diff src/madplug/SFMT-alti.c @ 922:7e14701aef54 trunk

[svn] - replace random number generator in dithering code with SIMD-oriented Fast Mersenne Twister (SFMT). it reduces CPU load on SSE2 or AltiVec capable platform.
author yaz
date Sun, 08 Apr 2007 21:30:22 -0700
parents
children fa7f7cd029af
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/madplug/SFMT-alti.c	Sun Apr 08 21:30:22 2007 -0700
@@ -0,0 +1,126 @@
+/**
+ * This function represents the recursion formula in AltiVec and BIG ENDIAN.
+ * @param a a 128-bit part of the interal state array
+ * @param b a 128-bit part of the interal state array
+ * @param c a 128-bit part of the interal state array
+ * @param d a 128-bit part of the interal state array
+ * @return output
+ */
+inline static vector unsigned int vec_recursion(vector unsigned int a,
+						vector unsigned int b,
+						vector unsigned int c,
+						vector unsigned int d) {
+
+    const vector unsigned int sl1 = (vector unsigned int)(SL1, SL1, SL1, SL1);
+    const vector unsigned int sr1 = (vector unsigned int)(SR1, SR1, SR1, SR1);
+#ifdef ONLY64
+    const vector unsigned int mask = (vector unsigned int)
+    (MSK2, MSK1, MSK4, MSK3);
+    const vector unsigned char perm_sl = ALTI_SL2_PERM64;
+    const vector unsigned char perm_sr = ALTI_SR2_PERM64;
+#else
+    const vector unsigned int mask = (vector unsigned int)
+    (MSK1, MSK2, MSK3, MSK4);
+    const vector unsigned char perm_sl = ALTI_SL2_PERM;
+    const vector unsigned char perm_sr = ALTI_SR2_PERM;
+#endif
+    vector unsigned int v, w, x, y, z;
+    x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl);
+    v = a;
+    y = vec_sr(b, sr1);
+    z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr);
+    w = vec_sl(d, sl1);
+    z = vec_xor(z, w);
+    y = vec_and(y, mask);
+    v = vec_xor(v, x);
+    z = vec_xor(z, y);
+    z = vec_xor(z, v);
+    return z;
+}
+
+/**
+ * This function fills the internal state array with psedorandom
+ * integers.
+ */
+inline static void gen_rand_all(void) {
+    int i;
+    vector unsigned int r, r1, r2;
+
+    r1 = sfmt[N - 2].s;
+    r2 = sfmt[N - 1].s;
+    for (i = 0; i < N - POS1; i++) {
+	r = vec_recursion(sfmt[i].s, sfmt[i + POS1].s, r1, r2);
+	sfmt[i].s = r;
+	r1 = r2;
+	r2 = r;
+    }
+    for (; i < N; i++) {
+	r = vec_recursion(sfmt[i].s, sfmt[i + POS1 - N].s, r1, r2);
+	sfmt[i].s = r;
+	r1 = r2;
+	r2 = r;
+    }
+}
+
+/**
+ * This function fills the user-specified array with psedorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.  
+ * @param size number of 128-bit pesudorandom numbers to be generated.
+ */
+inline static void gen_rand_array(w128_t array[], int size) {
+    int i, j;
+    vector unsigned int r, r1, r2;
+
+    r1 = sfmt[N - 2].s;
+    r2 = sfmt[N - 1].s;
+    for (i = 0; i < N - POS1; i++) {
+	r = vec_recursion(sfmt[i].s, sfmt[i + POS1].s, r1, r2);
+	array[i].s = r;
+	r1 = r2;
+	r2 = r;
+    }
+    for (; i < N; i++) {
+	r = vec_recursion(sfmt[i].s, array[i + POS1 - N].s, r1, r2);
+	array[i].s = r;
+	r1 = r2;
+	r2 = r;
+    }
+    /* main loop */
+    for (; i < size - N; i++) {
+	r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
+	array[i].s = r;
+	r1 = r2;
+	r2 = r;
+    }
+    for (j = 0; j < 2 * N - size; j++) {
+	sfmt[j].s = array[j + size - N].s;
+    }
+    for (; i < size; i++) {
+	r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
+	array[i].s = r;
+	sfmt[j++].s = r;
+	r1 = r2;
+	r2 = r;
+    }
+}
+
+#ifndef ONLY64
+/**
+ * This function swaps high and low 32-bit of 64-bit integers in user
+ * specified array.
+ *
+ * @param array an 128-bit array to be swaped.
+ * @param size size of 128-bit array.
+ */
+inline static void swap(w128_t array[], int size) {
+    int i;
+    const vector unsigned char perm = (vector unsigned char)
+	(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11);
+
+    for (i = 0; i < size; i++) {
+	array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm);
+    }
+}
+#endif