1
0
mirror of https://github.com/oliverbooth/X10D synced 2024-11-26 12:58:49 +00:00

Move some intrinsic methods around, reimplement RuneExtensions.Repeat(Rune, Int32)

This commit is contained in:
RealityProgrammer 2023-03-14 21:18:01 +07:00
parent 8b8aeb3f56
commit 77b0a8ca39
7 changed files with 292 additions and 250 deletions

View File

@ -45,6 +45,38 @@ public class RuneTests
Assert.AreEqual("a", repeated); Assert.AreEqual("a", repeated);
} }
[TestMethod]
public void RepeatCodepoint_0000_007F_ShouldCorrect()
{
string repeated = new Rune(69).Repeat(16);
Assert.AreEqual(16, repeated.Length);
Assert.AreEqual("EEEEEEEEEEEEEEEE", repeated);
}
[TestMethod]
public void RepeatCodepoint_0080_07FF_ShouldCorrect()
{
string repeated = new Rune(192).Repeat(8);
Assert.AreEqual(8, repeated.Length);
Assert.AreEqual("ÀÀÀÀÀÀÀÀ", repeated);
}
[TestMethod]
public void RepeatCodepoint_0800_FFFF_ShouldCorrect()
{
string repeated = new Rune(0x0800).Repeat(5);
Assert.AreEqual(5, repeated.Length);
Assert.AreEqual("ࠀࠀࠀࠀࠀ", repeated);
}
[TestMethod]
public void RepeatCodepointBeyondU10000ShouldCorrect()
{
string repeated = new Rune('\uD800', '\uDC00').Repeat(6);
Assert.AreEqual(12, repeated.Length);
Assert.AreEqual("𐀀𐀀𐀀𐀀𐀀𐀀", repeated);
}
[TestMethod] [TestMethod]
public void RepeatZeroCountShouldBeEmpty() public void RepeatZeroCountShouldBeEmpty()
{ {

View File

@ -19,7 +19,7 @@ namespace X10D {
// class via a tool like ResGen or Visual Studio. // class via a tool like ResGen or Visual Studio.
// To add or remove a member, edit your .ResX file then rerun ResGen // To add or remove a member, edit your .ResX file then rerun ResGen
// with the /str option, or rebuild your VS project. // with the /str option, or rebuild your VS project.
[global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "16.0.0.0")] [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "17.0.0.0")]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()] [global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
internal class Resource { internal class Resource {
@ -77,5 +77,14 @@ namespace X10D {
return ResourceManager.GetString("EnumParseNotEnumException", resourceCulture); return ResourceManager.GetString("EnumParseNotEnumException", resourceCulture);
} }
} }
/// <summary>
/// Looks up a localized string similar to Rune.Utf8SequenceLength returns value outside range 1 to 4 (inclusive), which is unexpected according to the official documentation..
/// </summary>
internal static string RuneUtf8SequenceLengthUnexpectedValue {
get {
return ResourceManager.GetString("RuneUtf8SequenceLengthUnexpectedValue", resourceCulture);
}
}
} }
} }

View File

@ -123,4 +123,7 @@
<data name="EnumParseNotEnumException" xml:space="preserve"> <data name="EnumParseNotEnumException" xml:space="preserve">
<value>Type provided must be an Enum.</value> <value>Type provided must be an Enum.</value>
</data> </data>
<data name="RuneUtf8SequenceLengthUnexpectedValue" xml:space="preserve">
<value>Rune.Utf8SequenceLength returns value outside range 1 to 4 (inclusive), which is unexpected according to the official documentation.</value>
</data>
</root> </root>

View File

@ -1,6 +1,9 @@
#if NETCOREAPP3_0_OR_GREATER #if NETCOREAPP3_0_OR_GREATER
using System.Diagnostics.Contracts;
using System.Runtime.CompilerServices;
using System.Runtime.Intrinsics; using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.X86;
namespace X10D.Core; namespace X10D.Core;
@ -10,6 +13,154 @@ namespace X10D.Core;
/// </summary> /// </summary>
public static class IntrinsicExtensions public static class IntrinsicExtensions
{ {
// Got nothing for now. /// <summary>
/// <para>
/// Correcting <see cref="Vector64{T}"/> of <see langword="byte"/> into 0 and 1 depend on their boolean truthiness.
/// </para>
/// Operation:<br/>
/// <code>
/// for (int i = 0; i &lt; 8; i++) {
/// dest[i] = vector[i] == 0 ? 0 : 1;
/// }
/// </code>
/// </summary>
/// <param name="vector">Vector of byte to correct.</param>
/// <returns>
/// A <see cref="Vector64{T}"/> of <see langword="byte"/> which remapped back to 0 and 1 based on boolean truthiness.
/// </returns>
[Pure]
[MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)]
public static Vector64<byte> CorrectBoolean(this Vector64<byte> vector)
{
// TODO: AdvSimd implementation.
// TODO: WasmSimd implementation. (?)
var output = IntrinsicUtility.GetUninitializedVector64<byte>();
for (int i = 0; i < Vector64<byte>.Count; i++)
{
ref var writeElement = ref Unsafe.Add(ref Unsafe.As<Vector64<byte>, byte>(ref output), i);
#if NET7_0_OR_GREATER
writeElement = vector[i] == 0 ? (byte)0 : (byte)1;
#else
var element = Unsafe.Add(ref Unsafe.As<Vector64<byte>, byte>(ref vector), i);
writeElement = element == 0 ? (byte)0 : (byte)1;
#endif
}
return output;
}
/// <summary>
/// <para>
/// Correcting <see cref="Vector128{T}"/> of <see langword="byte"/> into 0 and 1 depend on their boolean truthiness.
/// </para>
/// Operation:<br/>
/// <code>
/// for (int i = 0; i &lt; 16; i++) {
/// dest[i] = vector[i] == 0 ? 0 : 1;
/// }
/// </code>
/// </summary>
/// <param name="vector">Vector of byte to correct.</param>
/// <returns>
/// A <see cref="Vector128{T}"/> of <see langword="byte"/> which remapped back to 0 and 1 based on boolean truthiness.
/// </returns>
[Pure]
[MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)]
public static Vector128<byte> CorrectBoolean(this Vector128<byte> vector)
{
if (Sse2.IsSupported)
{
var cmp = Sse2.CompareEqual(vector, Vector128<byte>.Zero);
var result = Sse2.AndNot(cmp, Vector128.Create((byte)1));
return result;
}
// TODO: AdvSimd implementation.
// TODO: WasmSimd implementation.
var output = IntrinsicUtility.GetUninitializedVector128<byte>();
for (int i = 0; i < Vector128<byte>.Count; i++)
{
Unsafe.Add(ref Unsafe.As<Vector128<byte>, byte>(ref output), i) =
Unsafe.Add(ref Unsafe.As<Vector128<byte>, byte>(ref vector), i) == 0 ? (byte)0 : (byte)1;
}
return output;
}
/// <summary>
/// <para>
/// Correcting <see cref="Vector256{T}"/> of <see langword="byte"/> into 0 and 1 depend on their boolean truthiness.
/// </para>
/// Operation:<br/>
/// <code>
/// for (int i = 0; i &lt; 32; i++) {
/// dest[i] = vector[i] == 0 ? 0 : 1;
/// }
/// </code>
/// </summary>
/// <param name="vector">Vector of byte to correct.</param>
/// <returns>
/// A <see cref="Vector256{T}"/> of <see langword="byte"/> which remapped back to 0 and 1 based on boolean truthiness.
/// </returns>
[Pure]
[MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)]
public static Vector256<byte> CorrectBoolean(this Vector256<byte> vector)
{
if (Avx2.IsSupported)
{
var cmp = Avx2.CompareEqual(vector, Vector256<byte>.Zero);
var result = Avx2.AndNot(cmp, Vector256.Create((byte)1));
return result;
}
var output = IntrinsicUtility.GetUninitializedVector256<byte>();
for (int i = 0; i < Vector256<byte>.Count; i++)
{
Unsafe.Add(ref Unsafe.As<Vector256<byte>, byte>(ref output), i) =
Unsafe.Add(ref Unsafe.As<Vector256<byte>, byte>(ref vector), i) == 0 ? (byte)0 : (byte)1;
}
return output;
}
/// <summary>
/// <para>
/// Reverse position of 2 64-bit unsigned integer.
/// </para>
/// Operation:<br/>
/// <code>
/// dest[1] = vector[0];
/// dest[0] = vector[1];
/// </code>
/// </summary>
/// <param name="vector">Input vector.</param>
/// <returns>
/// A <see cref="Vector128{T}"/> of <see langword="ulong"/> with elements the same as input vector except their positions
/// (or indices) are reversed.
/// </returns>
[Pure]
[CLSCompliant(false)]
[MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)]
public static Vector128<ulong> ReverseElements(this Vector128<ulong> vector)
{
if (Sse2.IsSupported)
{
return Sse2.Shuffle(vector.AsDouble(), vector.AsDouble(), 0b01).AsUInt64();
}
Vector128<ulong> output = IntrinsicUtility.GetUninitializedVector128<ulong>();
Unsafe.As<Vector128<ulong>, ulong>(ref output) = Unsafe.Add(ref Unsafe.As<Vector128<ulong>, ulong>(ref vector), 1);
Unsafe.Add(ref Unsafe.As<Vector128<ulong>, ulong>(ref output), 1) = Unsafe.As<Vector128<ulong>, ulong>(ref vector);
return output;
}
} }
#endif #endif

View File

@ -14,149 +14,14 @@ namespace X10D.Core;
public static class IntrinsicUtility public static class IntrinsicUtility
{ {
// NOTE: // NOTE:
// ANY METHOD THAT OPERATE ON ANYTHING THAT ISN'T FLOAT IS NOT SSE COMPATIBLE, MUST BE SSE2 AND BEYOND VERSION // ANY METHOD THAT OPERATE ON ANYTHING THAT ISN'T FLOAT IS NOT SSE COMPATIBLE, MUST BE SSE2 AND BEYONDS
// FOR API CONSISTENCY. // FOR API CONSISTENCY.
/// <summary> /// <summary>
/// <br> /// <para>
/// Correcting <see cref="Vector64{T}"/> of <see langword="byte"/> into 0 and 1 depend on their boolean truthiness.
/// </br>
/// <br>Operation (raw):</br>
/// <code>
/// for (int i = 0; i &lt; 8; i++) {
/// dest[i] = ~(vector[i] == 0 ? 0xFF : 0x00) &amp; 1;
/// }
/// </code>
/// <br>Operation (simplified):</br>
/// <code>
/// for (int i = 0; i &lt; 8; i++) {
/// dest[i] = vector[i] == 0 ? 0 : 1;
/// }
/// </code>
/// </summary>
/// <param name="vector">Vector of byte to correct.</param>
/// <returns>
/// A <see cref="Vector64{T}"/> of <see langword="byte"/> which remapped back to 0 and 1 based on boolean truthiness.
/// </returns>
[Pure]
[MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)]
public static Vector64<byte> CorrectBoolean(Vector64<byte> vector)
{
// TODO: AdvSimd implementation.
// TODO: WasmSimd implementation. (?)
var output = GetUninitializedVector64<byte>();
for (int i = 0; i < Vector64<byte>.Count; i++)
{
ref var writeElement = ref Unsafe.Add(ref Unsafe.As<Vector64<byte>, byte>(ref output), i);
#if NET7_0_OR_GREATER
writeElement = vector[i] == 0 ? (byte)0 : (byte)1;
#else
var element = Unsafe.Add(ref Unsafe.As<Vector64<byte>, byte>(ref vector), i);
writeElement = element == 0 ? (byte)0 : (byte)1;
#endif
}
return output;
}
/// <summary>
/// <br>
/// Correcting <see cref="Vector128{T}"/> of <see langword="byte"/> into 0 and 1 depend on their boolean truthiness.
/// </br>
/// <br>Operation (raw):</br>
/// <code>
/// for (int i = 0; i &lt; 16; i++) {
/// dest[i] = ~(vector[i] == 0 ? 0xFF : 0x00) &amp; 1;
/// }
/// </code>
/// <br>Operation (simplified):</br>
/// <code>
/// for (int i = 0; i &lt; 16; i++) {
/// dest[i] = vector[i] == 0 ? 0 : 1;
/// }
/// </code>
/// </summary>
/// <param name="vector">Vector of byte to correct.</param>
/// <returns>
/// A <see cref="Vector128{T}"/> of <see langword="byte"/> which remapped back to 0 and 1 based on boolean truthiness.
/// </returns>
[Pure]
[MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)]
public static Vector128<byte> CorrectBoolean(Vector128<byte> vector)
{
if (Sse2.IsSupported)
{
var cmp = Sse2.CompareEqual(vector, Vector128<byte>.Zero);
var result = Sse2.AndNot(cmp, Vector128.Create((byte)1));
return result;
}
// TODO: AdvSimd implementation.
// TODO: WasmSimd implementation.
var output = GetUninitializedVector128<byte>();
for (int i = 0; i < Vector128<byte>.Count; i++)
{
Unsafe.Add(ref Unsafe.As<Vector128<byte>, byte>(ref output), i) =
Unsafe.Add(ref Unsafe.As<Vector128<byte>, byte>(ref vector), i) == 0 ? (byte)0 : (byte)1;
}
return output;
}
/// <summary>
/// <br>
/// Correcting <see cref="Vector256{T}"/> of <see langword="byte"/> into 0 and 1 depend on their boolean truthiness.
/// </br>
/// <br>Operation (raw):</br>
/// <code>
/// for (int i = 0; i &lt; 16; i++) {
/// dest[i] = ~(vector[i] == 0 ? 0xFF : 0x00) &amp; 1;
/// }
/// </code>
/// <br>Operation (simplified):</br>
/// <code>
/// for (int i = 0; i &lt; 16; i++) {
/// dest[i] = vector[i] == 0 ? 0 : 1;
/// }
/// </code>
/// </summary>
/// <param name="vector">Vector of byte to correct.</param>
/// <returns>
/// A <see cref="Vector256{T}"/> of <see langword="byte"/> which remapped back to 0 and 1 based on boolean truthiness.
/// </returns>
[Pure]
[MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)]
public static Vector256<byte> CorrectBoolean(Vector256<byte> vector)
{
if (Avx2.IsSupported)
{
var cmp = Avx2.CompareEqual(vector, Vector256<byte>.Zero);
var result = Avx2.AndNot(cmp, Vector256.Create((byte)1));
return result;
}
var output = GetUninitializedVector256<byte>();
for (int i = 0; i < Vector256<byte>.Count; i++)
{
Unsafe.Add(ref Unsafe.As<Vector256<byte>, byte>(ref output), i) =
Unsafe.Add(ref Unsafe.As<Vector256<byte>, byte>(ref vector), i) == 0 ? (byte)0 : (byte)1;
}
return output;
}
/// <summary>
/// <br>
/// Multiply packed 64-bit unsigned integer elements in a and b and truncate the results to 64-bit integer. /// Multiply packed 64-bit unsigned integer elements in a and b and truncate the results to 64-bit integer.
/// </br> /// </para>
/// <br>Operation:</br> /// Operation:<br/>
/// <code> /// <code>
/// dest[0] = lhs[0] * rhs[0]; /// dest[0] = lhs[0] * rhs[0];
/// dest[1] = lhs[1] * rhs[1]; /// dest[1] = lhs[1] * rhs[1];
@ -203,10 +68,10 @@ public static class IntrinsicUtility
} }
/// <summary> /// <summary>
/// <br> /// <para>
/// Multiply packed 64-bit unsigned integer elements in a and b and truncate the results to 64-bit integer. /// Multiply packed 64-bit unsigned integer elements in a and b and truncate the results to 64-bit integer.
/// </br> /// </para>
/// <br>Operation:</br> /// Operation:<br/>
/// <code> /// <code>
/// dest[0] = lhs[0] * rhs[0]; /// dest[0] = lhs[0] * rhs[0];
/// dest[1] = lhs[1] * rhs[1]; /// dest[1] = lhs[1] * rhs[1];
@ -252,10 +117,10 @@ public static class IntrinsicUtility
} }
/// <summary> /// <summary>
/// <br> /// <para>
/// Multiply packed 64-bit signed integer elements in a and b and truncate the results to 64-bit integer. /// Multiply packed 64-bit signed integer elements in a and b and truncate the results to 64-bit integer.
/// </br> /// </para>
/// <br>Operation:</br> /// Operation:<br/>
/// <code> /// <code>
/// dest[0] = lhs[0] * rhs[0]; /// dest[0] = lhs[0] * rhs[0];
/// dest[1] = lhs[1] * rhs[1]; /// dest[1] = lhs[1] * rhs[1];
@ -274,10 +139,10 @@ public static class IntrinsicUtility
} }
/// <summary> /// <summary>
/// <br> /// <para>
/// Multiply packed 64-bit signed integer elements in a and b and truncate the results to 64-bit integer. /// Multiply packed 64-bit signed integer elements in a and b and truncate the results to 64-bit integer.
/// </br> /// </para>
/// <br>Operation:</br> /// Operation:<br/>
/// <code> /// <code>
/// dest[0] = lhs[0] * rhs[0]; /// dest[0] = lhs[0] * rhs[0];
/// dest[1] = lhs[1] * rhs[1]; /// dest[1] = lhs[1] * rhs[1];
@ -298,11 +163,11 @@ public static class IntrinsicUtility
} }
/// <summary> /// <summary>
/// <br> /// <para>
/// Horizontally apply OR operation on adjacent pairs of single-precision (32-bit) floating-point elements in lhs and /// Horizontally apply OR operation on adjacent pairs of single-precision (32-bit) floating-point elements in lhs and
/// rhs. /// rhs.
/// </br> /// </para>
/// <br>Operation:</br> /// Operation:<br/>
/// <code> /// <code>
/// dest[0] = lhs[0] | lhs[1]; /// dest[0] = lhs[0] | lhs[1];
/// dest[1] = lhs[2] | lhs[3]; /// dest[1] = lhs[2] | lhs[3];
@ -353,10 +218,10 @@ public static class IntrinsicUtility
} }
/// <summary> /// <summary>
/// <br> /// <para>
/// Horizontally apply OR operation on adjacent pairs of 32-bit integer elements in lhs and rhs. /// Horizontally apply OR operation on adjacent pairs of 32-bit integer elements in lhs and rhs.
/// </br> /// </para>
/// <br>Operation:</br> /// Operation:<br/>
/// <code> /// <code>
/// dest[0] = lhs[0] | lhs[1]; /// dest[0] = lhs[0] | lhs[1];
/// dest[1] = lhs[2] | lhs[3]; /// dest[1] = lhs[2] | lhs[3];
@ -378,10 +243,10 @@ public static class IntrinsicUtility
} }
/// <summary> /// <summary>
/// <br> /// <para>
/// Horizontally apply OR operation on adjacent pairs of 32-bit unsigned integer elements in lhs and rhs. /// Horizontally apply OR operation on adjacent pairs of 32-bit unsigned integer elements in lhs and rhs.
/// </br> /// </para>
/// <br>Operation:</br> /// Operation:<br/>
/// <code> /// <code>
/// dest[0] = lhs[0] | lhs[1]; /// dest[0] = lhs[0] | lhs[1];
/// dest[1] = lhs[2] | lhs[3]; /// dest[1] = lhs[2] | lhs[3];
@ -403,41 +268,9 @@ public static class IntrinsicUtility
return HorizontalOr(lhs.AsSingle(), rhs.AsSingle()).AsUInt32(); return HorizontalOr(lhs.AsSingle(), rhs.AsSingle()).AsUInt32();
} }
/// <summary>
/// <br>Reverse position of 2 64-bit unsigned integer.</br>
/// <br>Operation:</br>
/// <code>
/// ulong tmp = vector[0];
/// vector[0] = vector[1];
/// vector[1] = tmp;
/// </code>
/// </summary>
/// <param name="vector">Input vector.</param>
/// <returns>
/// A <see cref="Vector128{T}"/> of <see langword="ulong"/> with elements the same as input vector except their positions
/// (or indices) are reversed.
/// </returns>
[Pure]
[CLSCompliant(false)]
[MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)]
public static Vector128<ulong> ReverseElements(Vector128<ulong> vector)
{
if (Sse2.IsSupported)
{
return Sse2.Shuffle(vector.AsDouble(), vector.AsDouble(), 0b01).AsUInt64();
}
Vector128<ulong> output = GetUninitializedVector128<ulong>();
Unsafe.As<Vector128<ulong>, ulong>(ref output) = Unsafe.Add(ref Unsafe.As<Vector128<ulong>, ulong>(ref vector), 1);
Unsafe.Add(ref Unsafe.As<Vector128<ulong>, ulong>(ref output), 1) = Unsafe.As<Vector128<ulong>, ulong>(ref vector);
return output;
}
// Helper methods // Helper methods
[MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)] [MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)]
private static Vector64<T> GetUninitializedVector64<T>() where T : struct internal static Vector64<T> GetUninitializedVector64<T>() where T : struct
{ {
#if NET6_0_OR_GREATER #if NET6_0_OR_GREATER
Unsafe.SkipInit(out Vector64<T> output); Unsafe.SkipInit(out Vector64<T> output);
@ -448,7 +281,7 @@ public static class IntrinsicUtility
} }
[MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)] [MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)]
private static Vector128<T> GetUninitializedVector128<T>() where T : struct internal static Vector128<T> GetUninitializedVector128<T>() where T : struct
{ {
#if NET6_0_OR_GREATER #if NET6_0_OR_GREATER
Unsafe.SkipInit(out Vector128<T> output); Unsafe.SkipInit(out Vector128<T> output);
@ -459,7 +292,7 @@ public static class IntrinsicUtility
} }
[MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)] [MethodImpl(MethodImplOptions.AggressiveInlining | MethodImplOptions.AggressiveOptimization)]
private static Vector256<T> GetUninitializedVector256<T>() where T : struct internal static Vector256<T> GetUninitializedVector256<T>() where T : struct
{ {
#if NET6_0_OR_GREATER #if NET6_0_OR_GREATER
Unsafe.SkipInit(out Vector256<T> output); Unsafe.SkipInit(out Vector256<T> output);

View File

@ -173,9 +173,8 @@ public static class SpanExtensions
if (Sse2.IsSupported) if (Sse2.IsSupported)
{ {
var load = Sse2.LoadScalarVector128((ulong*)pSource).AsByte(); var load = Sse2.LoadScalarVector128((ulong*)pSource).AsByte();
var correct = IntrinsicUtility.CorrectBoolean(load);
return unchecked((byte)(IntegerPackingMagic * correct.AsUInt64().GetElement(0) >> 56)); return unchecked((byte)(IntegerPackingMagic * load.CorrectBoolean().AsUInt64().GetElement(0) >> 56));
} }
// Probably should remove this piece of code because it is untested, but I see no reason why it should fail // Probably should remove this piece of code because it is untested, but I see no reason why it should fail
@ -185,14 +184,11 @@ public static class SpanExtensions
{ {
// Hasn't been tested since March 6th 2023 (Reason: Unavailable hardware). // Hasn't been tested since March 6th 2023 (Reason: Unavailable hardware).
var load = AdvSimd.LoadVector64((byte*)pSource); var load = AdvSimd.LoadVector64((byte*)pSource);
var correct = IntrinsicUtility.CorrectBoolean(load);
return unchecked((byte)(IntegerPackingMagic * correct.AsUInt64().GetElement(0) >> 56)); return unchecked((byte)(IntegerPackingMagic * load.CorrectBoolean().AsUInt64().GetElement(0) >> 56));
}
else
{
goto default;
} }
goto default;
} }
#endif #endif
default: default:
@ -252,10 +248,10 @@ public static class SpanExtensions
{ {
fixed (bool* pSource = source) fixed (bool* pSource = source)
{ {
var load = Sse2.LoadVector128((byte*)pSource); Vector128<byte> load = Sse2.LoadVector128((byte*)pSource);
var correct = IntrinsicUtility.CorrectBoolean(load).AsUInt64(); Vector128<ulong> correct = load.CorrectBoolean().AsUInt64();
var multiply = IntrinsicUtility.Multiply(IntegerPackingMagicV128, correct); Vector128<ulong> multiply = IntrinsicUtility.Multiply(IntegerPackingMagicV128, correct);
var shift = Sse2.ShiftRightLogical(multiply, 56); Vector128<ulong> shift = Sse2.ShiftRightLogical(multiply, 56);
return (short)(shift.GetElement(0) | (shift.GetElement(1) << 8)); return (short)(shift.GetElement(0) | (shift.GetElement(1) << 8));
} }
@ -319,52 +315,52 @@ public static class SpanExtensions
{ {
if (Avx2.IsSupported) if (Avx2.IsSupported)
{ {
var load = Avx.LoadVector256((byte*)pSource); Vector256<byte> load = Avx.LoadVector256((byte*)pSource);
var correct = IntrinsicUtility.CorrectBoolean(load).AsUInt64(); Vector256<ulong> correct = load.CorrectBoolean().AsUInt64();
var multiply = IntrinsicUtility.Multiply(IntegerPackingMagicV256, correct); Vector256<ulong> multiply = IntrinsicUtility.Multiply(IntegerPackingMagicV256, correct);
var shift = Avx2.ShiftRightLogical(multiply, 56); Vector256<ulong> shift = Avx2.ShiftRightLogical(multiply, 56);
shift = Avx2.ShiftLeftLogicalVariable(shift, Vector256.Create(0UL, 8, 16, 24)); shift = Avx2.ShiftLeftLogicalVariable(shift, Vector256.Create(0UL, 8, 16, 24));
var p1 = Avx2.Permute4x64(shift, 0b10_11_00_01); Vector256<ulong> p1 = Avx2.Permute4x64(shift, 0b10_11_00_01);
var or1 = Avx2.Or(shift, p1); Vector256<ulong> or1 = Avx2.Or(shift, p1);
var p2 = Avx2.Permute4x64(or1, 0b00_00_10_10); Vector256<ulong> p2 = Avx2.Permute4x64(or1, 0b00_00_10_10);
var or2 = Avx2.Or(or1, p2); Vector256<ulong> or2 = Avx2.Or(or1, p2);
return (int)or2.GetElement(0); return (int)or2.GetElement(0);
} }
if (Sse2.IsSupported) if (Sse2.IsSupported)
{ {
var load = Sse2.LoadVector128((byte*)pSource); Vector128<byte> load = Sse2.LoadVector128((byte*)pSource);
var correct = IntrinsicUtility.CorrectBoolean(load).AsUInt64(); Vector128<ulong> correct = load.CorrectBoolean().AsUInt64();
var multiply = IntrinsicUtility.Multiply(IntegerPackingMagicV128, correct); Vector128<ulong> multiply = IntrinsicUtility.Multiply(IntegerPackingMagicV128, correct);
var shift1 = Sse2.ShiftRightLogical(multiply, 56); Vector128<ulong> shift1 = Sse2.ShiftRightLogical(multiply, 56);
shift1 = Sse2.ShiftLeftLogical(shift1, Vector128.Create(0UL, 8UL)); shift1 = Sse2.ShiftLeftLogical(shift1, Vector128.Create(0UL, 8UL));
load = Sse2.LoadVector128((byte*)(pSource + 16)); load = Sse2.LoadVector128((byte*)(pSource + 16));
correct = IntrinsicUtility.CorrectBoolean(load).AsUInt64(); correct = load.CorrectBoolean().AsUInt64();
multiply = IntrinsicUtility.Multiply(IntegerPackingMagicV128, correct); multiply = IntrinsicUtility.Multiply(IntegerPackingMagicV128, correct);
var shift2 = Sse2.ShiftRightLogical(multiply, 56); Vector128<ulong> shift2 = Sse2.ShiftRightLogical(multiply, 56);
shift2 = Sse2.ShiftLeftLogical(shift2, Vector128.Create(16UL, 24UL)); shift2 = Sse2.ShiftLeftLogical(shift2, Vector128.Create(16UL, 24UL));
var or1 = Sse2.Or(shift1, shift2); Vector128<ulong> or1 = Sse2.Or(shift1, shift2);
var or2 = Sse2.Or(or1, IntrinsicUtility.ReverseElements(or1)); Vector128<ulong> or2 = Sse2.Or(or1, or1.ReverseElements());
return (int)or2.GetElement(0); return (int)or2.GetElement(0);
} }
if (AdvSimd.IsSupported) if (AdvSimd.IsSupported)
{ {
// Hasn't been tested since March 6th 2023 (Reason: Unavailable hardware). // Hasn't been tested since March 6th 2023 (Reason: Unavailable hardware).
var vector1 = IntrinsicUtility.CorrectBoolean(AdvSimd.LoadVector128((byte*)pSource)).AsUInt64(); Vector128<ulong> vector1 = AdvSimd.LoadVector128((byte*)pSource).CorrectBoolean().AsUInt64();
var vector2 = IntrinsicUtility.CorrectBoolean(AdvSimd.LoadVector128((byte*)(pSource + 16))).AsUInt64(); Vector128<ulong> vector2 = AdvSimd.LoadVector128((byte*)(pSource + 16)).CorrectBoolean().AsUInt64();
var calc1 = AdvSimd.ShiftRightLogical(IntrinsicUtility.Multiply(IntegerPackingMagicV128, vector1), 56); Vector128<ulong> calc1 = AdvSimd.ShiftRightLogical(IntrinsicUtility.Multiply(IntegerPackingMagicV128, vector1), 56);
var calc2 = AdvSimd.ShiftRightLogical(IntrinsicUtility.Multiply(IntegerPackingMagicV128, vector2), 56); Vector128<ulong> calc2 = AdvSimd.ShiftRightLogical(IntrinsicUtility.Multiply(IntegerPackingMagicV128, vector2), 56);
var shift1 = AdvSimd.ShiftLogical(calc1, Vector128.Create(0, 8)); Vector128<ulong> shift1 = AdvSimd.ShiftLogical(calc1, Vector128.Create(0, 8));
var shift2 = AdvSimd.ShiftLogical(calc2, Vector128.Create(16, 24)); Vector128<ulong> shift2 = AdvSimd.ShiftLogical(calc2, Vector128.Create(16, 24));
return (int)(shift1.GetElement(0) | shift1.GetElement(1) | shift2.GetElement(0) | shift2.GetElement(1)); return (int)(shift1.GetElement(0) | shift1.GetElement(1) | shift2.GetElement(0) | shift2.GetElement(1));
} }

View File

@ -1,5 +1,5 @@
#if NETCOREAPP3_0_OR_GREATER #if NETCOREAPP3_0_OR_GREATER
using System; using System.Diagnostics;
using System.Diagnostics.Contracts; using System.Diagnostics.Contracts;
using System.Runtime.CompilerServices; using System.Runtime.CompilerServices;
using System.Runtime.InteropServices; using System.Runtime.InteropServices;
@ -47,40 +47,58 @@ public static class RuneExtensions
} }
// Helpful documentation: https://en.wikipedia.org/wiki/UTF-8 // Helpful documentation: https://en.wikipedia.org/wiki/UTF-8
// This probably gonna break interning but whatever.
switch (value.Utf8SequenceLength) switch (value.Utf8SequenceLength)
{ {
case 1: case 1:
{ {
Unsafe.SkipInit(out byte bytes); // Codepoint 0 to 0x00FF can be directly turn into char value without any conversion.
value.EncodeToUtf8(MemoryMarshal.CreateSpan(ref bytes, 1));
return new string((char)value.Value, count); return new string((char)value.Value, count);
} }
// Codepoint 0x0080 to 0x07FF takes 2 UTF-8 bytes, and it can be represented by 1 UTF-16 character (.NET runtime use
// UTF-16 encoding).
// Source: https://stackoverflow.com/questions/63905684
case 2: case 2:
// Codepoint 0x0800 to 0xFFFF takes 3 UTF-8 bytes, and can also be represented by 1 UTF-16 character.
case 3:
{ {
Span<byte> bytes = stackalloc byte[2]; // Codepoint 0x0080 to 0x07FF convert into 1 .NET character string, directly use string constructor.
value.EncodeToUtf8(bytes); unsafe
{
Span<byte> bytes = stackalloc byte[value.Utf8SequenceLength];
value.EncodeToUtf8(bytes);
return new string(Encoding.UTF8.GetString(bytes)[0], count); char character;
Encoding.UTF8.GetChars(bytes, new Span<char>(&character, 1));
return new string(character, count);
}
}
// Codepoint 0x10000 and beyond will takes **only** 2 UTF-16 character.
case 4:
{
return string.Create(count * 2, value, (span, rune) =>
{
unsafe {
Span<byte> bytes = stackalloc byte[4];
value.EncodeToUtf8(bytes);
int characters; // 2 characters, fit inside 1 32-bit integer.
Encoding.UTF8.GetChars(bytes, new Span<char>(&characters, 2));
MemoryMarshal.Cast<char, int>(span).Fill(characters);
}
});
} }
default: default:
{ #if NET7_0_OR_GREATER
int utf8SequenceLength = value.Utf8SequenceLength; throw new UnreachableException(Resource.RuneUtf8SequenceLengthUnexpectedValue);
Span<byte> utf8 = stackalloc byte[utf8SequenceLength]; #else
value.EncodeToUtf8(utf8); throw new InvalidOperationException(Resource.RuneUtf8SequenceLengthUnexpectedValue);
#endif
// Limit to maximum 1024 bytes stack allocation (Rune.Utf8SequenceLength return value in range of [1; 4])
Span<byte> buffer = count <= 256 ? stackalloc byte[utf8.Length * count] : new byte[utf8.Length * count];
for (var index = 0; index < count; index++)
{
utf8.CopyTo(buffer.Slice(index * utf8.Length, utf8.Length));
}
return Encoding.UTF8.GetString(buffer);
}
} }
} }
} }