aboutsummaryrefslogtreecommitdiff
path: root/src/rt
diff options
context:
space:
mode:
Diffstat (limited to 'src/rt')
-rw-r--r--src/rt/bigint/bigint.h294
-rw-r--r--src/rt/bigint/bigint_ext.cpp553
-rw-r--r--src/rt/bigint/bigint_int.cpp1428
-rw-r--r--src/rt/bigint/low_primes.h1069
-rw-r--r--src/rt/isaac/rand.h56
-rw-r--r--src/rt/isaac/randport.cpp134
-rw-r--r--src/rt/isaac/standard.h57
-rw-r--r--src/rt/memcheck.h309
-rw-r--r--src/rt/rust.cpp267
-rw-r--r--src/rt/rust.h49
-rw-r--r--src/rt/rust_builtin.cpp129
-rw-r--r--src/rt/rust_chan.cpp34
-rw-r--r--src/rt/rust_chan.h22
-rw-r--r--src/rt/rust_comm.cpp199
-rw-r--r--src/rt/rust_crate.cpp63
-rw-r--r--src/rt/rust_crate_cache.cpp306
-rw-r--r--src/rt/rust_crate_reader.cpp578
-rw-r--r--src/rt/rust_dom.cpp271
-rw-r--r--src/rt/rust_dwarf.h198
-rw-r--r--src/rt/rust_internal.h730
-rw-r--r--src/rt/rust_log.cpp117
-rw-r--r--src/rt/rust_log.h59
-rw-r--r--src/rt/rust_task.cpp474
-rw-r--r--src/rt/rust_timer.cpp97
-rw-r--r--src/rt/rust_upcall.cpp654
-rw-r--r--src/rt/rust_util.h155
-rw-r--r--src/rt/sync/fair_ticket_lock.cpp43
-rw-r--r--src/rt/sync/fair_ticket_lock.h15
-rw-r--r--src/rt/sync/lock_free_queue.cpp37
-rw-r--r--src/rt/sync/lock_free_queue.h15
-rw-r--r--src/rt/sync/spin_lock.cpp47
-rw-r--r--src/rt/sync/spin_lock.h14
-rw-r--r--src/rt/uthash/uthash.h766
-rw-r--r--src/rt/uthash/utlist.h280
-rw-r--r--src/rt/util/array_list.h69
-rw-r--r--src/rt/valgrind.h3926
36 files changed, 13514 insertions, 0 deletions
diff --git a/src/rt/bigint/bigint.h b/src/rt/bigint/bigint.h
new file mode 100644
index 00000000..b4c48f03
--- /dev/null
+++ b/src/rt/bigint/bigint.h
@@ -0,0 +1,294 @@
+/* bigint.h - include file for bigint package
+**
+** This library lets you do math on arbitrarily large integers. It's
+** pretty fast - compared with the multi-precision routines in the "bc"
+** calculator program, these routines are between two and twelve times faster,
+** except for division which is maybe half as fast.
+**
+** The calling convention is a little unusual. There's a basic problem
+** with writing a math library in a language that doesn't do automatic
+** garbage collection - what do you do about intermediate results?
+** You'd like to be able to write code like this:
+**
+** d = bi_sqrt( bi_add( bi_multiply( x, x ), bi_multiply( y, y ) ) );
+**
+** That works fine when the numbers being passed back and forth are
+** actual values - ints, floats, or even fixed-size structs. However,
+** when the numbers can be any size, as in this package, then you have
+** to pass them around as pointers to dynamically-allocated objects.
+** Those objects have to get de-allocated after you are done with them.
+** But how do you de-allocate the intermediate results in a complicated
+** multiple-call expression like the above?
+**
+** There are two common solutions to this problem. One, switch all your
+** code to a language that provides automatic garbage collection, for
+** example Java. This is a fine idea and I recommend you do it wherever
+** it's feasible. Two, change your routines to use a calling convention
+** that prevents people from writing multiple-call expressions like that.
+** The resulting code will be somewhat clumsy-looking, but it will work
+** just fine.
+**
+** This package uses a third method, which I haven't seen used anywhere
+** before. It's simple: each number can be used precisely once, after
+** which it is automatically de-allocated. This handles the anonymous
+** intermediate values perfectly. Named values still need to be copied
+** and freed explicitly. Here's the above example using this convention:
+**
+** d = bi_sqrt( bi_add(
+** bi_multiply( bi_copy( x ), bi_copy( x ) ),
+** bi_multiply( bi_copy( y ), bi_copy( y ) ) ) );
+** bi_free( x );
+** bi_free( y );
+**
+** Or, since the package contains a square routine, you could just write:
+**
+** d = bi_sqrt( bi_add( bi_square( x ), bi_square( y ) ) );
+**
+** This time the named values are only being used once, so you don't
+** have to copy and free them.
+**
+** This really works, however you do have to be very careful when writing
+** your code. If you leave out a bi_copy() and use a value more than once,
+** you'll get a runtime error about "zero refs" and a SIGFPE. Run your
+** code in a debugger, get a backtrace to see where the call was, and then
+** eyeball the code there to see where you need to add the bi_copy().
+**
+**
+** Copyright � 2000 by Jef Poskanzer <[email protected]>.
+** All rights reserved.
+**
+** Redistribution and use in source and binary forms, with or without
+** modification, are permitted provided that the following conditions
+** are met:
+** 1. Redistributions of source code must retain the above copyright
+** notice, this list of conditions and the following disclaimer.
+** 2. Redistributions in binary form must reproduce the above copyright
+** notice, this list of conditions and the following disclaimer in the
+** documentation and/or other materials provided with the distribution.
+**
+** THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+** IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+** ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+** FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+** DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+** OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+** HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+** LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+** OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+** SUCH DAMAGE.
+*/
+
+
+/* Type definition for bigints - it's an opaque type, the real definition
+** is in bigint.c.
+*/
+typedef void* bigint;
+
+
+/* Some convenient pre-initialized numbers. These are all permanent,
+** so you can use them as many times as you want without calling bi_copy().
+*/
+extern bigint bi_0, bi_1, bi_2, bi_10, bi_m1, bi_maxint, bi_minint;
+
+
+/* Initialize the bigint package. You must call this when your program
+** starts up.
+*/
+void bi_initialize( void );
+
+/* Shut down the bigint package. You should call this when your program
+** exits. It's not actually required, but it does do some consistency
+** checks which help keep your program bug-free, so you really ought
+** to call it.
+*/
+void bi_terminate( void );
+
+/* Run in unsafe mode, skipping most runtime checks. Slightly faster.
+** Once your code is debugged you can add this call after bi_initialize().
+*/
+void bi_no_check( void );
+
+/* Make a copy of a bigint. You must call this if you want to use a
+** bigint more than once. (Or you can make the bigint permanent.)
+** Note that this routine is very cheap - all it actually does is
+** increment a reference counter.
+*/
+bigint bi_copy( bigint bi );
+
+/* Make a bigint permanent, so it doesn't get automatically freed when
+** used as an operand.
+*/
+void bi_permanent( bigint bi );
+
+/* Undo bi_permanent(). The next use will free the bigint. */
+void bi_depermanent( bigint bi );
+
+/* Explicitly free a bigint. Normally bigints get freed automatically
+** when they are used as an operand. This routine lets you free one
+** without using it. If the bigint is permanent, this doesn't do
+** anything, you have to depermanent it first.
+*/
+void bi_free( bigint bi );
+
+/* Compare two bigints. Returns -1, 0, or 1. */
+int bi_compare( bigint bia, bigint bib );
+
+/* Convert an int to a bigint. */
+bigint int_to_bi( int i );
+
+/* Convert a string to a bigint. */
+bigint str_to_bi( char* str );
+
+/* Convert a bigint to an int. SIGFPE on overflow. */
+int bi_to_int( bigint bi );
+
+/* Write a bigint to a file. */
+void bi_print( FILE* f, bigint bi );
+
+/* Read a bigint from a file. */
+bigint bi_scan( FILE* f );
+
+
+/* Operations on a bigint and a regular int. */
+
+/* Add an int to a bigint. */
+bigint bi_int_add( bigint bi, int i );
+
+/* Subtract an int from a bigint. */
+bigint bi_int_subtract( bigint bi, int i );
+
+/* Multiply a bigint by an int. */
+bigint bi_int_multiply( bigint bi, int i );
+
+/* Divide a bigint by an int. SIGFPE on divide-by-zero. */
+bigint bi_int_divide( bigint binumer, int denom );
+
+/* Take the remainder of a bigint by an int, with an int result.
+** SIGFPE if m is zero.
+*/
+int bi_int_rem( bigint bi, int m );
+
+/* Take the modulus of a bigint by an int, with an int result.
+** Note that mod is not rem: mod is always within [0..m), while
+** rem can be negative. SIGFPE if m is zero or negative.
+*/
+int bi_int_mod( bigint bi, int m );
+
+
+/* Basic operations on two bigints. */
+
+/* Add two bigints. */
+bigint bi_add( bigint bia, bigint bib );
+
+/* Subtract bib from bia. */
+bigint bi_subtract( bigint bia, bigint bib );
+
+/* Multiply two bigints. */
+bigint bi_multiply( bigint bia, bigint bib );
+
+/* Divide one bigint by another. SIGFPE on divide-by-zero. */
+bigint bi_divide( bigint binumer, bigint bidenom );
+
+/* Binary division of one bigint by another. SIGFPE on divide-by-zero.
+** This is here just for testing. It's about five times slower than
+** regular division.
+*/
+bigint bi_binary_divide( bigint binumer, bigint bidenom );
+
+/* Take the remainder of one bigint by another. SIGFPE if bim is zero. */
+bigint bi_rem( bigint bia, bigint bim );
+
+/* Take the modulus of one bigint by another. Note that mod is not rem:
+** mod is always within [0..bim), while rem can be negative. SIGFPE if
+** bim is zero or negative.
+*/
+bigint bi_mod( bigint bia, bigint bim );
+
+
+/* Some less common operations. */
+
+/* Negate a bigint. */
+bigint bi_negate( bigint bi );
+
+/* Absolute value of a bigint. */
+bigint bi_abs( bigint bi );
+
+/* Divide a bigint in half. */
+bigint bi_half( bigint bi );
+
+/* Multiply a bigint by two. */
+bigint bi_double( bigint bi );
+
+/* Square a bigint. */
+bigint bi_square( bigint bi );
+
+/* Raise bi to the power of biexp. SIGFPE if biexp is negative. */
+bigint bi_power( bigint bi, bigint biexp );
+
+/* Integer square root. */
+bigint bi_sqrt( bigint bi );
+
+/* Factorial. */
+bigint bi_factorial( bigint bi );
+
+
+/* Some predicates. */
+
+/* 1 if the bigint is odd, 0 if it's even. */
+int bi_is_odd( bigint bi );
+
+/* 1 if the bigint is even, 0 if it's odd. */
+int bi_is_even( bigint bi );
+
+/* 1 if the bigint equals zero, 0 if it's nonzero. */
+int bi_is_zero( bigint bi );
+
+/* 1 if the bigint equals one, 0 otherwise. */
+int bi_is_one( bigint bi );
+
+/* 1 if the bigint is less than zero, 0 if it's zero or greater. */
+int bi_is_negative( bigint bi );
+
+
+/* Now we get into the esoteric number-theory stuff used for cryptography. */
+
+/* Modular exponentiation. Much faster than bi_mod(bi_power(bi,biexp),bim).
+** Also, biexp can be negative.
+*/
+bigint bi_mod_power( bigint bi, bigint biexp, bigint bim );
+
+/* Modular inverse. mod( bi * modinv(bi), bim ) == 1. SIGFPE if bi is not
+** relatively prime to bim.
+*/
+bigint bi_mod_inverse( bigint bi, bigint bim );
+
+/* Produce a random number in the half-open interval [0..bi). You need
+** to have called srandom() before using this.
+*/
+bigint bi_random( bigint bi );
+
+/* Greatest common divisor of two bigints. Euclid's algorithm. */
+bigint bi_gcd( bigint bim, bigint bin );
+
+/* Greatest common divisor of two bigints, plus the corresponding multipliers.
+** Extended Euclid's algorithm.
+*/
+bigint bi_egcd( bigint bim, bigint bin, bigint* bim_mul, bigint* bin_mul );
+
+/* Least common multiple of two bigints. */
+bigint bi_lcm( bigint bia, bigint bib );
+
+/* The Jacobi symbol. SIGFPE if bib is even. */
+bigint bi_jacobi( bigint bia, bigint bib );
+
+/* Probabalistic prime checking. A non-zero return means the probability
+** that bi is prime is at least 1 - 1/2 ^ certainty.
+*/
+int bi_is_probable_prime( bigint bi, int certainty );
+
+/* Random probabilistic prime with the specified number of bits. */
+bigint bi_generate_prime( int bits, int certainty );
+
+/* Number of bits in the number. The log base 2, approximately. */
+int bi_bits( bigint bi );
diff --git a/src/rt/bigint/bigint_ext.cpp b/src/rt/bigint/bigint_ext.cpp
new file mode 100644
index 00000000..66d79106
--- /dev/null
+++ b/src/rt/bigint/bigint_ext.cpp
@@ -0,0 +1,553 @@
+/* bigint_ext - external portion of large integer package
+**
+** Copyright � 2000 by Jef Poskanzer <[email protected]>.
+** All rights reserved.
+**
+** Redistribution and use in source and binary forms, with or without
+** modification, are permitted provided that the following conditions
+** are met:
+** 1. Redistributions of source code must retain the above copyright
+** notice, this list of conditions and the following disclaimer.
+** 2. Redistributions in binary form must reproduce the above copyright
+** notice, this list of conditions and the following disclaimer in the
+** documentation and/or other materials provided with the distribution.
+**
+** THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+** IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+** ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+** FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+** DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+** OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+** HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+** LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+** OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+** SUCH DAMAGE.
+*/
+
+#include <sys/types.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <time.h>
+
+#include "bigint.h"
+#include "low_primes.h"
+
+
+bigint bi_0, bi_1, bi_2, bi_10, bi_m1, bi_maxint, bi_minint;
+
+
+/* Forwards. */
+static void print_pos( FILE* f, bigint bi );
+
+
+bigint
+str_to_bi( char* str )
+ {
+ int sign;
+ bigint biR;
+
+ sign = 1;
+ if ( *str == '-' )
+ {
+ sign = -1;
+ ++str;
+ }
+ for ( biR = bi_0; *str >= '0' && *str <= '9'; ++str )
+ biR = bi_int_add( bi_int_multiply( biR, 10 ), *str - '0' );
+ if ( sign == -1 )
+ biR = bi_negate( biR );
+ return biR;
+ }
+
+
+void
+bi_print( FILE* f, bigint bi )
+ {
+ if ( bi_is_negative( bi_copy( bi ) ) )
+ {
+ putc( '-', f );
+ bi = bi_negate( bi );
+ }
+ print_pos( f, bi );
+ }
+
+
+bigint
+bi_scan( FILE* f )
+ {
+ int sign;
+ int c;
+ bigint biR;
+
+ sign = 1;
+ c = getc( f );
+ if ( c == '-' )
+ sign = -1;
+ else
+ ungetc( c, f );
+
+ biR = bi_0;
+ for (;;)
+ {
+ c = getc( f );
+ if ( c < '0' || c > '9' )
+ break;
+ biR = bi_int_add( bi_int_multiply( biR, 10 ), c - '0' );
+ }
+
+ if ( sign == -1 )
+ biR = bi_negate( biR );
+ return biR;
+ }
+
+
+static void
+print_pos( FILE* f, bigint bi )
+ {
+ if ( bi_compare( bi_copy( bi ), bi_10 ) >= 0 )
+ print_pos( f, bi_int_divide( bi_copy( bi ), 10 ) );
+ putc( bi_int_mod( bi, 10 ) + '0', f );
+ }
+
+
+int
+bi_int_mod( bigint bi, int m )
+ {
+ int r;
+
+ if ( m <= 0 )
+ {
+ (void) fprintf( stderr, "bi_int_mod: zero or negative modulus\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ r = bi_int_rem( bi, m );
+ if ( r < 0 )
+ r += m;
+ return r;
+ }
+
+
+bigint
+bi_rem( bigint bia, bigint bim )
+ {
+ return bi_subtract(
+ bia, bi_multiply( bi_divide( bi_copy( bia ), bi_copy( bim ) ), bim ) );
+ }
+
+
+bigint
+bi_mod( bigint bia, bigint bim )
+ {
+ bigint biR;
+
+ if ( bi_compare( bi_copy( bim ), bi_0 ) <= 0 )
+ {
+ (void) fprintf( stderr, "bi_mod: zero or negative modulus\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ biR = bi_rem( bia, bi_copy( bim ) );
+ if ( bi_is_negative( bi_copy( biR ) ) )
+ biR = bi_add( biR, bim );
+ else
+ bi_free( bim );
+ return biR;
+ }
+
+
+bigint
+bi_square( bigint bi )
+ {
+ bigint biR;
+
+ biR = bi_multiply( bi_copy( bi ), bi_copy( bi ) );
+ bi_free( bi );
+ return biR;
+ }
+
+
+bigint
+bi_power( bigint bi, bigint biexp )
+ {
+ bigint biR;
+
+ if ( bi_is_negative( bi_copy( biexp ) ) )
+ {
+ (void) fprintf( stderr, "bi_power: negative exponent\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ biR = bi_1;
+ for (;;)
+ {
+ if ( bi_is_odd( bi_copy( biexp ) ) )
+ biR = bi_multiply( biR, bi_copy( bi ) );
+ biexp = bi_half( biexp );
+ if ( bi_compare( bi_copy( biexp ), bi_0 ) <= 0 )
+ break;
+ bi = bi_multiply( bi_copy( bi ), bi );
+ }
+ bi_free( bi );
+ bi_free( biexp );
+ return biR;
+ }
+
+
+bigint
+bi_factorial( bigint bi )
+ {
+ bigint biR;
+
+ biR = bi_1;
+ while ( bi_compare( bi_copy( bi ), bi_1 ) > 0 )
+ {
+ biR = bi_multiply( biR, bi_copy( bi ) );
+ bi = bi_int_subtract( bi, 1 );
+ }
+ bi_free( bi );
+ return biR;
+ }
+
+
+int
+bi_is_even( bigint bi )
+ {
+ return ! bi_is_odd( bi );
+ }
+
+
+bigint
+bi_mod_power( bigint bi, bigint biexp, bigint bim )
+ {
+ int invert;
+ bigint biR;
+
+ invert = 0;
+ if ( bi_is_negative( bi_copy( biexp ) ) )
+ {
+ biexp = bi_negate( biexp );
+ invert = 1;
+ }
+
+ biR = bi_1;
+ for (;;)
+ {
+ if ( bi_is_odd( bi_copy( biexp ) ) )
+ biR = bi_mod( bi_multiply( biR, bi_copy( bi ) ), bi_copy( bim ) );
+ biexp = bi_half( biexp );
+ if ( bi_compare( bi_copy( biexp ), bi_0 ) <= 0 )
+ break;
+ bi = bi_mod( bi_multiply( bi_copy( bi ), bi ), bi_copy( bim ) );
+ }
+ bi_free( bi );
+ bi_free( biexp );
+
+ if ( invert )
+ biR = bi_mod_inverse( biR, bim );
+ else
+ bi_free( bim );
+ return biR;
+ }
+
+
+bigint
+bi_mod_inverse( bigint bi, bigint bim )
+ {
+ bigint gcd, mul0, mul1;
+
+ gcd = bi_egcd( bi_copy( bim ), bi, &mul0, &mul1 );
+
+ /* Did we get gcd == 1? */
+ if ( ! bi_is_one( gcd ) )
+ {
+ (void) fprintf( stderr, "bi_mod_inverse: not relatively prime\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+
+ bi_free( mul0 );
+ return bi_mod( mul1, bim );
+ }
+
+
+/* Euclid's algorithm. */
+bigint
+bi_gcd( bigint bim, bigint bin )
+ {
+ bigint bit;
+
+ bim = bi_abs( bim );
+ bin = bi_abs( bin );
+ while ( ! bi_is_zero( bi_copy( bin ) ) )
+ {
+ bit = bi_mod( bim, bi_copy( bin ) );
+ bim = bin;
+ bin = bit;
+ }
+ bi_free( bin );
+ return bim;
+ }
+
+
+/* Extended Euclidean algorithm. */
+bigint
+bi_egcd( bigint bim, bigint bin, bigint* bim_mul, bigint* bin_mul )
+ {
+ bigint a0, b0, c0, a1, b1, c1, q, t;
+
+ if ( bi_is_negative( bi_copy( bim ) ) )
+ {
+ bigint biR;
+
+ biR = bi_egcd( bi_negate( bim ), bin, &t, bin_mul );
+ *bim_mul = bi_negate( t );
+ return biR;
+ }
+ if ( bi_is_negative( bi_copy( bin ) ) )
+ {
+ bigint biR;
+
+ biR = bi_egcd( bim, bi_negate( bin ), bim_mul, &t );
+ *bin_mul = bi_negate( t );
+ return biR;
+ }
+
+ a0 = bi_1; b0 = bi_0; c0 = bim;
+ a1 = bi_0; b1 = bi_1; c1 = bin;
+
+ while ( ! bi_is_zero( bi_copy( c1 ) ) )
+ {
+ q = bi_divide( bi_copy( c0 ), bi_copy( c1 ) );
+ t = a0;
+ a0 = bi_copy( a1 );
+ a1 = bi_subtract( t, bi_multiply( bi_copy( q ), a1 ) );
+ t = b0;
+ b0 = bi_copy( b1 );
+ b1 = bi_subtract( t, bi_multiply( bi_copy( q ), b1 ) );
+ t = c0;
+ c0 = bi_copy( c1 );
+ c1 = bi_subtract( t, bi_multiply( bi_copy( q ), c1 ) );
+ bi_free( q );
+ }
+
+ bi_free( a1 );
+ bi_free( b1 );
+ bi_free( c1 );
+ *bim_mul = a0;
+ *bin_mul = b0;
+ return c0;
+ }
+
+
+bigint
+bi_lcm( bigint bia, bigint bib )
+ {
+ bigint biR;
+
+ biR = bi_divide(
+ bi_multiply( bi_copy( bia ), bi_copy( bib ) ),
+ bi_gcd( bi_copy( bia ), bi_copy( bib ) ) );
+ bi_free( bia );
+ bi_free( bib );
+ return biR;
+ }
+
+
+/* The Jacobi symbol. */
+bigint
+bi_jacobi( bigint bia, bigint bib )
+ {
+ bigint biR;
+
+ if ( bi_is_even( bi_copy( bib ) ) )
+ {
+ (void) fprintf( stderr, "bi_jacobi: don't know how to compute Jacobi(n, even)\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+
+ if ( bi_compare( bi_copy( bia ), bi_copy( bib ) ) >= 0 )
+ return bi_jacobi( bi_mod( bia, bi_copy( bib ) ), bib );
+
+ if ( bi_is_zero( bi_copy( bia ) ) || bi_is_one( bi_copy( bia ) ) )
+ {
+ bi_free( bib );
+ return bia;
+ }
+
+ if ( bi_compare( bi_copy( bia ), bi_2 ) == 0 )
+ {
+ bi_free( bia );
+ switch ( bi_int_mod( bib, 8 ) )
+ {
+ case 1: case 7:
+ return bi_1;
+ case 3: case 5:
+ return bi_m1;
+ }
+ }
+
+ if ( bi_is_even( bi_copy( bia ) ) )
+ {
+ biR = bi_multiply(
+ bi_jacobi( bi_2, bi_copy( bib ) ),
+ bi_jacobi( bi_half( bia ), bi_copy( bib ) ) );
+ bi_free( bib );
+ return biR;
+ }
+
+ if ( bi_int_mod( bi_copy( bia ), 4 ) == 3 &&
+ bi_int_mod( bi_copy( bib ), 4 ) == 3 )
+ return bi_negate( bi_jacobi( bib, bia ) );
+ else
+ return bi_jacobi( bib, bia );
+ }
+
+
+/* Probabalistic prime checking. */
+int
+bi_is_probable_prime( bigint bi, int certainty )
+ {
+ int i, p;
+ bigint bim1;
+
+ /* First do trial division by a list of small primes. This eliminates
+ ** many candidates.
+ */
+ for ( i = 0; i < sizeof(low_primes)/sizeof(*low_primes); ++i )
+ {
+ p = low_primes[i];
+ switch ( bi_compare( int_to_bi( p ), bi_copy( bi ) ) )
+ {
+ case 0:
+ bi_free( bi );
+ return 1;
+ case 1:
+ bi_free( bi );
+ return 0;
+ }
+ if ( bi_int_mod( bi_copy( bi ), p ) == 0 )
+ {
+ bi_free( bi );
+ return 0;
+ }
+ }
+
+ /* Now do the probabilistic tests. */
+ bim1 = bi_int_subtract( bi_copy( bi ), 1 );
+ for ( i = 0; i < certainty; ++i )
+ {
+ bigint a, j, jac;
+
+ /* Pick random test number. */
+ a = bi_random( bi_copy( bi ) );
+
+ /* Decide whether to run the Fermat test or the Solovay-Strassen
+ ** test. The Fermat test is fast but lets some composite numbers
+ ** through. Solovay-Strassen runs slower but is more certain.
+ ** So the compromise here is we run the Fermat test a couple of
+ ** times to quickly reject most composite numbers, and then do
+ ** the rest of the iterations with Solovay-Strassen so nothing
+ ** slips through.
+ */
+ if ( i < 2 && certainty >= 5 )
+ {
+ /* Fermat test. Note that this is not state of the art. There's a
+ ** class of numbers called Carmichael numbers which are composite
+ ** but look prime to this test - it lets them slip through no
+ ** matter how many reps you run. However, it's nice and fast so
+ ** we run it anyway to help quickly reject most of the composites.
+ */
+ if ( ! bi_is_one( bi_mod_power( bi_copy( a ), bi_copy( bim1 ), bi_copy( bi ) ) ) )
+ {
+ bi_free( bi );
+ bi_free( bim1 );
+ bi_free( a );
+ return 0;
+ }
+ }
+ else
+ {
+ /* GCD test. This rarely hits, but we need it for Solovay-Strassen. */
+ if ( ! bi_is_one( bi_gcd( bi_copy( bi ), bi_copy( a ) ) ) )
+ {
+ bi_free( bi );
+ bi_free( bim1 );
+ bi_free( a );
+ return 0;
+ }
+
+ /* Solovay-Strassen test. First compute pseudo Jacobi. */
+ j = bi_mod_power(
+ bi_copy( a ), bi_half( bi_copy( bim1 ) ), bi_copy( bi ) );
+ if ( bi_compare( bi_copy( j ), bi_copy( bim1 ) ) == 0 )
+ {
+ bi_free( j );
+ j = bi_m1;
+ }
+
+ /* Now compute real Jacobi. */
+ jac = bi_jacobi( bi_copy( a ), bi_copy( bi ) );
+
+ /* If they're not equal, the number is definitely composite. */
+ if ( bi_compare( j, jac ) != 0 )
+ {
+ bi_free( bi );
+ bi_free( bim1 );
+ bi_free( a );
+ return 0;
+ }
+ }
+
+ bi_free( a );
+ }
+
+ bi_free( bim1 );
+
+ bi_free( bi );
+ return 1;
+ }
+
+
+bigint
+bi_generate_prime( int bits, int certainty )
+ {
+ bigint bimo2, bip;
+ int i, inc = 0;
+
+ bimo2 = bi_power( bi_2, int_to_bi( bits - 1 ) );
+ for (;;)
+ {
+ bip = bi_add( bi_random( bi_copy( bimo2 ) ), bi_copy( bimo2 ) );
+ /* By shoving the candidate numbers up to the next highest multiple
+ ** of six plus or minus one, we pre-eliminate all multiples of
+ ** two and/or three.
+ */
+ switch ( bi_int_mod( bi_copy( bip ), 6 ) )
+ {
+ case 0: inc = 4; bip = bi_int_add( bip, 1 ); break;
+ case 1: inc = 4; break;
+ case 2: inc = 2; bip = bi_int_add( bip, 3 ); break;
+ case 3: inc = 2; bip = bi_int_add( bip, 2 ); break;
+ case 4: inc = 2; bip = bi_int_add( bip, 1 ); break;
+ case 5: inc = 2; break;
+ }
+ /* Starting from the generated random number, check a bunch of
+ ** numbers in sequence. This is just to avoid calls to bi_random(),
+ ** which is more expensive than a simple add.
+ */
+ for ( i = 0; i < 1000; ++i ) /* arbitrary */
+ {
+ if ( bi_is_probable_prime( bi_copy( bip ), certainty ) )
+ {
+ bi_free( bimo2 );
+ return bip;
+ }
+ bip = bi_int_add( bip, inc );
+ inc = 6 - inc;
+ }
+ /* We ran through the whole sequence and didn't find a prime.
+ ** Shrug, just try a different random starting point.
+ */
+ bi_free( bip );
+ }
+ }
diff --git a/src/rt/bigint/bigint_int.cpp b/src/rt/bigint/bigint_int.cpp
new file mode 100644
index 00000000..194ddcb5
--- /dev/null
+++ b/src/rt/bigint/bigint_int.cpp
@@ -0,0 +1,1428 @@
+/* bigint - internal portion of large integer package
+**
+** Copyright � 2000 by Jef Poskanzer <[email protected]>.
+** All rights reserved.
+**
+** Redistribution and use in source and binary forms, with or without
+** modification, are permitted provided that the following conditions
+** are met:
+** 1. Redistributions of source code must retain the above copyright
+** notice, this list of conditions and the following disclaimer.
+** 2. Redistributions in binary form must reproduce the above copyright
+** notice, this list of conditions and the following disclaimer in the
+** documentation and/or other materials provided with the distribution.
+**
+** THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+** IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+** ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+** FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+** DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+** OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+** HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+** LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+** OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+** SUCH DAMAGE.
+*/
+
+#include <sys/types.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <time.h>
+
+#include "bigint.h"
+
+#define max(a,b) ((a)>(b)?(a):(b))
+#define min(a,b) ((a)<(b)?(a):(b))
+
+/* MAXINT and MININT extracted from <values.h>, which gives a warning
+** message if included.
+*/
+#define BITSPERBYTE 8
+#define BITS(type) (BITSPERBYTE * (int)sizeof(type))
+#define INTBITS BITS(int)
+#define MININT (1 << (INTBITS - 1))
+#define MAXINT (~MININT)
+
+
+/* The package represents arbitrary-precision integers as a sign and a sum
+** of components multiplied by successive powers of the basic radix, i.e.:
+**
+** sign * ( comp0 + comp1 * radix + comp2 * radix^2 + comp3 * radix^3 )
+**
+** To make good use of the computer's word size, the radix is chosen
+** to be a power of two. It could be chosen to be the full word size,
+** however this would require a lot of finagling in the middle of the
+** algorithms to get the inter-word overflows right. That would slow things
+** down. Instead, the radix is chosen to be *half* the actual word size.
+** With just a little care, this means the words can hold all intermediate
+** values, and the overflows can be handled all at once at the end, in a
+** normalization step. This simplifies the coding enormously, and is probably
+** somewhat faster to run. The cost is that numbers use twice as much
+** storage as they would with the most efficient representation, but storage
+** is cheap.
+**
+** A few more notes on the representation:
+**
+** - The sign is always 1 or -1, never 0. The number 0 is represented
+** with a sign of 1.
+** - The components are signed numbers, to allow for negative intermediate
+** values. After normalization, all components are >= 0 and the sign is
+** updated.
+*/
+
+/* Type definition for bigints. */
+typedef int64_t comp; /* should be the largest signed int type you have */
+struct _real_bigint {
+ int refs;
+ struct _real_bigint* next;
+ int num_comps, max_comps;
+ int sign;
+ comp* comps;
+ };
+typedef struct _real_bigint* real_bigint;
+
+
+#undef DUMP
+
+
+#define PERMANENT 123456789
+
+static comp bi_radix, bi_radix_o2;
+static int bi_radix_sqrt, bi_comp_bits;
+
+static real_bigint active_list, free_list;
+static int active_count, free_count;
+static int check_level;
+
+
+/* Forwards. */
+static bigint regular_multiply( real_bigint bia, real_bigint bib );
+static bigint multi_divide( bigint binumer, real_bigint bidenom );
+static bigint multi_divide2( bigint binumer, real_bigint bidenom );
+static void more_comps( real_bigint bi, int n );
+static real_bigint alloc( int num_comps );
+static real_bigint clone( real_bigint bi );
+static void normalize( real_bigint bi );
+static void check( real_bigint bi );
+static void double_check( void );
+static void triple_check( void );
+#ifdef DUMP
+static void dump( char* str, bigint bi );
+#endif /* DUMP */
+static int csqrt( comp c );
+static int cbits( comp c );
+
+
+void
+bi_initialize( void )
+ {
+ /* Set the radix. This does not actually have to be a power of
+ ** two, that's just the most efficient value. It does have to
+ ** be even for bi_half() to work.
+ */
+ bi_radix = 1;
+ bi_radix <<= BITS(comp) / 2 - 1;
+
+ /* Halve the radix. Only used by bi_half(). */
+ bi_radix_o2 = bi_radix >> 1;
+
+ /* Take the square root of the radix. Only used by bi_divide(). */
+ bi_radix_sqrt = csqrt( bi_radix );
+
+ /* Figure out how many bits in a component. Only used by bi_bits(). */
+ bi_comp_bits = cbits( bi_radix - 1 );
+
+ /* Init various globals. */
+ active_list = (real_bigint) 0;
+ active_count = 0;
+ free_list = (real_bigint) 0;
+ free_count = 0;
+
+ /* This can be 0 through 3. */
+ check_level = 3;
+
+ /* Set up some convenient bigints. */
+ bi_0 = int_to_bi( 0 ); bi_permanent( bi_0 );
+ bi_1 = int_to_bi( 1 ); bi_permanent( bi_1 );
+ bi_2 = int_to_bi( 2 ); bi_permanent( bi_2 );
+ bi_10 = int_to_bi( 10 ); bi_permanent( bi_10 );
+ bi_m1 = int_to_bi( -1 ); bi_permanent( bi_m1 );
+ bi_maxint = int_to_bi( MAXINT ); bi_permanent( bi_maxint );
+ bi_minint = int_to_bi( MININT ); bi_permanent( bi_minint );
+ }
+
+
+void
+bi_terminate( void )
+ {
+ real_bigint p, pn;
+
+ bi_depermanent( bi_0 ); bi_free( bi_0 );
+ bi_depermanent( bi_1 ); bi_free( bi_1 );
+ bi_depermanent( bi_2 ); bi_free( bi_2 );
+ bi_depermanent( bi_10 ); bi_free( bi_10 );
+ bi_depermanent( bi_m1 ); bi_free( bi_m1 );
+ bi_depermanent( bi_maxint ); bi_free( bi_maxint );
+ bi_depermanent( bi_minint ); bi_free( bi_minint );
+
+ if ( active_count != 0 )
+ (void) fprintf(
+ stderr, "bi_terminate: there were %d un-freed bigints\n",
+ active_count );
+ if ( check_level >= 2 )
+ double_check();
+ if ( check_level >= 3 )
+ {
+ triple_check();
+ for ( p = active_list; p != (bigint) 0; p = pn )
+ {
+ pn = p->next;
+ free( p->comps );
+ free( p );
+ }
+ }
+ for ( p = free_list; p != (bigint) 0; p = pn )
+ {
+ pn = p->next;
+ free( p->comps );
+ free( p );
+ }
+ }
+
+
+void
+bi_no_check( void )
+ {
+ check_level = 0;
+ }
+
+
+bigint
+bi_copy( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+
+ check( bi );
+ if ( bi->refs != PERMANENT )
+ ++bi->refs;
+ return bi;
+ }
+
+
+void
+bi_permanent( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+
+ check( bi );
+ if ( check_level >= 1 && bi->refs != 1 )
+ {
+ (void) fprintf( stderr, "bi_permanent: refs was not 1\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ bi->refs = PERMANENT;
+ }
+
+
+void
+bi_depermanent( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+
+ check( bi );
+ if ( check_level >= 1 && bi->refs != PERMANENT )
+ {
+ (void) fprintf( stderr, "bi_depermanent: bigint was not permanent\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ bi->refs = 1;
+ }
+
+
+void
+bi_free( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+
+ check( bi );
+ if ( bi->refs == PERMANENT )
+ return;
+ --bi->refs;
+ if ( bi->refs > 0 )
+ return;
+ if ( check_level >= 3 )
+ {
+ /* The active list only gets maintained at check levels 3 or higher. */
+ real_bigint* nextP;
+ for ( nextP = &active_list; *nextP != (real_bigint) 0; nextP = &((*nextP)->next) )
+ if ( *nextP == bi )
+ {
+ *nextP = bi->next;
+ break;
+ }
+ }
+ --active_count;
+ bi->next = free_list;
+ free_list = bi;
+ ++free_count;
+ if ( check_level >= 1 && active_count < 0 )
+ {
+ (void) fprintf( stderr,
+ "bi_free: active_count went negative - double-freed bigint?\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ }
+
+
+int
+bi_compare( bigint obia, bigint obib )
+ {
+ real_bigint bia = (real_bigint) obia;
+ real_bigint bib = (real_bigint) obib;
+ int r, c;
+
+ check( bia );
+ check( bib );
+
+ /* First check for pointer equality. */
+ if ( bia == bib )
+ r = 0;
+ else
+ {
+ /* Compare signs. */
+ if ( bia->sign > bib->sign )
+ r = 1;
+ else if ( bia->sign < bib->sign )
+ r = -1;
+ /* Signs are the same. Check the number of components. */
+ else if ( bia->num_comps > bib->num_comps )
+ r = bia->sign;
+ else if ( bia->num_comps < bib->num_comps )
+ r = -bia->sign;
+ else
+ {
+ /* Same number of components. Compare starting from the high end
+ ** and working down.
+ */
+ r = 0; /* if we complete the loop, the numbers are equal */
+ for ( c = bia->num_comps - 1; c >= 0; --c )
+ {
+ if ( bia->comps[c] > bib->comps[c] )
+ { r = bia->sign; break; }
+ else if ( bia->comps[c] < bib->comps[c] )
+ { r = -bia->sign; break; }
+ }
+ }
+ }
+
+ bi_free( bia );
+ bi_free( bib );
+ return r;
+ }
+
+
+bigint
+int_to_bi( int i )
+ {
+ real_bigint biR;
+
+ biR = alloc( 1 );
+ biR->sign = 1;
+ biR->comps[0] = i;
+ normalize( biR );
+ check( biR );
+ return biR;
+ }
+
+
+int
+bi_to_int( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+ comp v, m;
+ int c, r;
+
+ check( bi );
+ if ( bi_compare( bi_copy( bi ), bi_maxint ) > 0 ||
+ bi_compare( bi_copy( bi ), bi_minint ) < 0 )
+ {
+ (void) fprintf( stderr, "bi_to_int: overflow\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ v = 0;
+ m = 1;
+ for ( c = 0; c < bi->num_comps; ++c )
+ {
+ v += bi->comps[c] * m;
+ m *= bi_radix;
+ }
+ r = (int) ( bi->sign * v );
+ bi_free( bi );
+ return r;
+ }
+
+
+bigint
+bi_int_add( bigint obi, int i )
+ {
+ real_bigint bi = (real_bigint) obi;
+ real_bigint biR;
+
+ check( bi );
+ biR = clone( bi );
+ if ( biR->sign == 1 )
+ biR->comps[0] += i;
+ else
+ biR->comps[0] -= i;
+ normalize( biR );
+ check( biR );
+ return biR;
+ }
+
+
+bigint
+bi_int_subtract( bigint obi, int i )
+ {
+ real_bigint bi = (real_bigint) obi;
+ real_bigint biR;
+
+ check( bi );
+ biR = clone( bi );
+ if ( biR->sign == 1 )
+ biR->comps[0] -= i;
+ else
+ biR->comps[0] += i;
+ normalize( biR );
+ check( biR );
+ return biR;
+ }
+
+
+bigint
+bi_int_multiply( bigint obi, int i )
+ {
+ real_bigint bi = (real_bigint) obi;
+ real_bigint biR;
+ int c;
+
+ check( bi );
+ biR = clone( bi );
+ if ( i < 0 )
+ {
+ i = -i;
+ biR->sign = -biR->sign;
+ }
+ for ( c = 0; c < biR->num_comps; ++c )
+ biR->comps[c] *= i;
+ normalize( biR );
+ check( biR );
+ return biR;
+ }
+
+
+bigint
+bi_int_divide( bigint obinumer, int denom )
+ {
+ real_bigint binumer = (real_bigint) obinumer;
+ real_bigint biR;
+ int c;
+ comp r;
+
+ check( binumer );
+ if ( denom == 0 )
+ {
+ (void) fprintf( stderr, "bi_int_divide: divide by zero\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ biR = clone( binumer );
+ if ( denom < 0 )
+ {
+ denom = -denom;
+ biR->sign = -biR->sign;
+ }
+ r = 0;
+ for ( c = biR->num_comps - 1; c >= 0; --c )
+ {
+ r = r * bi_radix + biR->comps[c];
+ biR->comps[c] = r / denom;
+ r = r % denom;
+ }
+ normalize( biR );
+ check( biR );
+ return biR;
+ }
+
+
+int
+bi_int_rem( bigint obi, int m )
+ {
+ real_bigint bi = (real_bigint) obi;
+ comp rad_r, r;
+ int c;
+
+ check( bi );
+ if ( m == 0 )
+ {
+ (void) fprintf( stderr, "bi_int_rem: divide by zero\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ if ( m < 0 )
+ m = -m;
+ rad_r = 1;
+ r = 0;
+ for ( c = 0; c < bi->num_comps; ++c )
+ {
+ r = ( r + bi->comps[c] * rad_r ) % m;
+ rad_r = ( rad_r * bi_radix ) % m;
+ }
+ if ( bi->sign < 1 )
+ r = -r;
+ bi_free( bi );
+ return (int) r;
+ }
+
+
+bigint
+bi_add( bigint obia, bigint obib )
+ {
+ real_bigint bia = (real_bigint) obia;
+ real_bigint bib = (real_bigint) obib;
+ real_bigint biR;
+ int c;
+
+ check( bia );
+ check( bib );
+ biR = clone( bia );
+ more_comps( biR, max( biR->num_comps, bib->num_comps ) );
+ for ( c = 0; c < bib->num_comps; ++c )
+ if ( biR->sign == bib->sign )
+ biR->comps[c] += bib->comps[c];
+ else
+ biR->comps[c] -= bib->comps[c];
+ bi_free( bib );
+ normalize( biR );
+ check( biR );
+ return biR;
+ }
+
+
+bigint
+bi_subtract( bigint obia, bigint obib )
+ {
+ real_bigint bia = (real_bigint) obia;
+ real_bigint bib = (real_bigint) obib;
+ real_bigint biR;
+ int c;
+
+ check( bia );
+ check( bib );
+ biR = clone( bia );
+ more_comps( biR, max( biR->num_comps, bib->num_comps ) );
+ for ( c = 0; c < bib->num_comps; ++c )
+ if ( biR->sign == bib->sign )
+ biR->comps[c] -= bib->comps[c];
+ else
+ biR->comps[c] += bib->comps[c];
+ bi_free( bib );
+ normalize( biR );
+ check( biR );
+ return biR;
+ }
+
+
+/* Karatsuba multiplication. This is supposedly O(n^1.59), better than
+** regular multiplication for large n. The define below sets the crossover
+** point - below that we use regular multiplication, above it we
+** use Karatsuba. Note that Karatsuba is a recursive algorithm, so
+** all Karatsuba calls involve regular multiplications as the base
+** steps.
+*/
+#define KARATSUBA_THRESH 12
+bigint
+bi_multiply( bigint obia, bigint obib )
+ {
+ real_bigint bia = (real_bigint) obia;
+ real_bigint bib = (real_bigint) obib;
+
+ check( bia );
+ check( bib );
+ if ( min( bia->num_comps, bib->num_comps ) < KARATSUBA_THRESH )
+ return regular_multiply( bia, bib );
+ else
+ {
+ /* The factors are large enough that Karatsuba multiplication
+ ** is a win. The basic idea here is you break each factor up
+ ** into two parts, like so:
+ ** i * r^n + j k * r^n + l
+ ** r is the radix we're representing numbers with, so this
+ ** breaking up just means shuffling components around, no
+ ** math required. With regular multiplication the product
+ ** would be:
+ ** ik * r^(n*2) + ( il + jk ) * r^n + jl
+ ** That's four sub-multiplies and one addition, not counting the
+ ** radix-shifting. With Karatsuba, you instead do:
+ ** ik * r^(n*2) + ( (i+j)(k+l) - ik - jl ) * r^n + jl
+ ** This is only three sub-multiplies. The number of adds
+ ** (and subtracts) increases to four, but those run in linear time
+ ** so they are cheap. The sub-multiplies are accomplished by
+ ** recursive calls, eventually reducing to regular multiplication.
+ */
+ int n, c;
+ real_bigint bi_i, bi_j, bi_k, bi_l;
+ real_bigint bi_ik, bi_mid, bi_jl;
+
+ n = ( max( bia->num_comps, bib->num_comps ) + 1 ) / 2;
+ bi_i = alloc( n );
+ bi_j = alloc( n );
+ bi_k = alloc( n );
+ bi_l = alloc( n );
+ for ( c = 0; c < n; ++c )
+ {
+ if ( c + n < bia->num_comps )
+ bi_i->comps[c] = bia->comps[c + n];
+ else
+ bi_i->comps[c] = 0;
+ if ( c < bia->num_comps )
+ bi_j->comps[c] = bia->comps[c];
+ else
+ bi_j->comps[c] = 0;
+ if ( c + n < bib->num_comps )
+ bi_k->comps[c] = bib->comps[c + n];
+ else
+ bi_k->comps[c] = 0;
+ if ( c < bib->num_comps )
+ bi_l->comps[c] = bib->comps[c];
+ else
+ bi_l->comps[c] = 0;
+ }
+ bi_i->sign = bi_j->sign = bi_k->sign = bi_l->sign = 1;
+ normalize( bi_i );
+ normalize( bi_j );
+ normalize( bi_k );
+ normalize( bi_l );
+ bi_ik = bi_multiply( bi_copy( bi_i ), bi_copy( bi_k ) );
+ bi_jl = bi_multiply( bi_copy( bi_j ), bi_copy( bi_l ) );
+ bi_mid = bi_subtract(
+ bi_subtract(
+ bi_multiply( bi_add( bi_i, bi_j ), bi_add( bi_k, bi_l ) ),
+ bi_copy( bi_ik ) ),
+ bi_copy( bi_jl ) );
+ more_comps(
+ bi_jl, max( bi_mid->num_comps + n, bi_ik->num_comps + n * 2 ) );
+ for ( c = 0; c < bi_mid->num_comps; ++c )
+ bi_jl->comps[c + n] += bi_mid->comps[c];
+ for ( c = 0; c < bi_ik->num_comps; ++c )
+ bi_jl->comps[c + n * 2] += bi_ik->comps[c];
+ bi_free( bi_ik );
+ bi_free( bi_mid );
+ bi_jl->sign = bia->sign * bib->sign;
+ bi_free( bia );
+ bi_free( bib );
+ normalize( bi_jl );
+ check( bi_jl );
+ return bi_jl;
+ }
+ }
+
+
+/* Regular O(n^2) multiplication. */
+static bigint
+regular_multiply( real_bigint bia, real_bigint bib )
+ {
+ real_bigint biR;
+ int new_comps, c1, c2;
+
+ check( bia );
+ check( bib );
+ biR = clone( bi_0 );
+ new_comps = bia->num_comps + bib->num_comps;
+ more_comps( biR, new_comps );
+ for ( c1 = 0; c1 < bia->num_comps; ++c1 )
+ {
+ for ( c2 = 0; c2 < bib->num_comps; ++c2 )
+ biR->comps[c1 + c2] += bia->comps[c1] * bib->comps[c2];
+ /* Normalize after each inner loop to avoid overflowing any
+ ** components. But be sure to reset biR's components count,
+ ** in case a previous normalization lowered it.
+ */
+ biR->num_comps = new_comps;
+ normalize( biR );
+ }
+ check( biR );
+ if ( ! bi_is_zero( bi_copy( biR ) ) )
+ biR->sign = bia->sign * bib->sign;
+ bi_free( bia );
+ bi_free( bib );
+ return biR;
+ }
+
+
+/* The following three routines implement a multi-precision divide method
+** that I haven't seen used anywhere else. It is not quite as fast as
+** the standard divide method, but it is a lot simpler. In fact it's
+** about as simple as the binary shift-and-subtract method, which goes
+** about five times slower than this.
+**
+** The method assumes you already have multi-precision multiply and subtract
+** routines, and also a multi-by-single precision divide routine. The latter
+** is used to generate approximations, which are then checked and corrected
+** using the former. The result converges to the correct value by about
+** 16 bits per loop.
+*/
+
+/* Public routine to divide two arbitrary numbers. */
+bigint
+bi_divide( bigint binumer, bigint obidenom )
+ {
+ real_bigint bidenom = (real_bigint) obidenom;
+ int sign;
+ bigint biquotient;
+
+ /* Check signs and trivial cases. */
+ sign = 1;
+ switch ( bi_compare( bi_copy( bidenom ), bi_0 ) )
+ {
+ case 0:
+ (void) fprintf( stderr, "bi_divide: divide by zero\n" );
+ (void) kill( getpid(), SIGFPE );
+ case -1:
+ sign *= -1;
+ bidenom = bi_negate( bidenom );
+ break;
+ }
+ switch ( bi_compare( bi_copy( binumer ), bi_0 ) )
+ {
+ case 0:
+ bi_free( binumer );
+ bi_free( bidenom );
+ return bi_0;
+ case -1:
+ sign *= -1;
+ binumer = bi_negate( binumer );
+ break;
+ }
+ switch ( bi_compare( bi_copy( binumer ), bi_copy( bidenom ) ) )
+ {
+ case -1:
+ bi_free( binumer );
+ bi_free( bidenom );
+ return bi_0;
+ case 0:
+ bi_free( binumer );
+ bi_free( bidenom );
+ if ( sign == 1 )
+ return bi_1;
+ else
+ return bi_m1;
+ }
+
+ /* Is the denominator small enough to do an int divide? */
+ if ( bidenom->num_comps == 1 )
+ {
+ /* Win! */
+ biquotient = bi_int_divide( binumer, bidenom->comps[0] );
+ bi_free( bidenom );
+ }
+ else
+ {
+ /* No, we have to do a full multi-by-multi divide. */
+ biquotient = multi_divide( binumer, bidenom );
+ }
+
+ if ( sign == -1 )
+ biquotient = bi_negate( biquotient );
+ return biquotient;
+ }
+
+
+/* Divide two multi-precision positive numbers. */
+static bigint
+multi_divide( bigint binumer, real_bigint bidenom )
+ {
+ /* We use a successive approximation method that is kind of like a
+ ** continued fraction. The basic approximation is to do an int divide
+ ** by the high-order component of the denominator. Then we correct
+ ** based on the remainder from that.
+ **
+ ** However, if the high-order component is too small, this doesn't
+ ** work well. In particular, if the high-order component is 1 it
+ ** doesn't work at all. Easily fixed, though - if the component
+ ** is too small, increase it!
+ */
+ if ( bidenom->comps[bidenom->num_comps-1] < bi_radix_sqrt )
+ {
+ /* We use the square root of the radix as the threshhold here
+ ** because that's the largest value guaranteed to not make the
+ ** high-order component overflow and become too small again.
+ **
+ ** We increase binumer along with bidenom to keep the end result
+ ** the same.
+ */
+ binumer = bi_int_multiply( binumer, bi_radix_sqrt );
+ bidenom = bi_int_multiply( bidenom, bi_radix_sqrt );
+ }
+
+ /* Now start the recursion. */
+ return multi_divide2( binumer, bidenom );
+ }
+
+
+/* Divide two multi-precision positive conditioned numbers. */
+static bigint
+multi_divide2( bigint binumer, real_bigint bidenom )
+ {
+ real_bigint biapprox;
+ bigint birem, biquotient;
+ int c, o;
+
+ /* Figure out the approximate quotient. Since we're dividing by only
+ ** the top component of the denominator, which is less than or equal to
+ ** the full denominator, the result is guaranteed to be greater than or
+ ** equal to the correct quotient.
+ */
+ o = bidenom->num_comps - 1;
+ biapprox = bi_int_divide( bi_copy( binumer ), bidenom->comps[o] );
+ /* And downshift the result to get the approximate quotient. */
+ for ( c = o; c < biapprox->num_comps; ++c )
+ biapprox->comps[c - o] = biapprox->comps[c];
+ biapprox->num_comps -= o;
+
+ /* Find the remainder from the approximate quotient. */
+ birem = bi_subtract(
+ bi_multiply( bi_copy( biapprox ), bi_copy( bidenom ) ), binumer );
+
+ /* If the remainder is negative, zero, or in fact any value less
+ ** than bidenom, then we have the correct quotient and we're done.
+ */
+ if ( bi_compare( bi_copy( birem ), bi_copy( bidenom ) ) < 0 )
+ {
+ biquotient = biapprox;
+ bi_free( birem );
+ bi_free( bidenom );
+ }
+ else
+ {
+ /* The real quotient is now biapprox - birem / bidenom. We still
+ ** have to do a divide. However, birem is smaller than binumer,
+ ** so the next divide will go faster. We do the divide by
+ ** recursion. Since this is tail-recursion or close to it, we
+ ** could probably re-arrange things and make it a non-recursive
+ ** loop, but the overhead of recursion is small and the bookkeeping
+ ** is simpler this way.
+ **
+ ** Note that since the sub-divide uses the same denominator, it
+ ** doesn't have to adjust the values again - the high-order component
+ ** will still be good.
+ */
+ biquotient = bi_subtract( biapprox, multi_divide2( birem, bidenom ) );
+ }
+
+ return biquotient;
+ }
+
+
+/* Binary division - about five times slower than the above. */
+bigint
+bi_binary_divide( bigint binumer, bigint obidenom )
+ {
+ real_bigint bidenom = (real_bigint) obidenom;
+ int sign;
+ bigint biquotient;
+
+ /* Check signs and trivial cases. */
+ sign = 1;
+ switch ( bi_compare( bi_copy( bidenom ), bi_0 ) )
+ {
+ case 0:
+ (void) fprintf( stderr, "bi_divide: divide by zero\n" );
+ (void) kill( getpid(), SIGFPE );
+ case -1:
+ sign *= -1;
+ bidenom = bi_negate( bidenom );
+ break;
+ }
+ switch ( bi_compare( bi_copy( binumer ), bi_0 ) )
+ {
+ case 0:
+ bi_free( binumer );
+ bi_free( bidenom );
+ return bi_0;
+ case -1:
+ sign *= -1;
+ binumer = bi_negate( binumer );
+ break;
+ }
+ switch ( bi_compare( bi_copy( binumer ), bi_copy( bidenom ) ) )
+ {
+ case -1:
+ bi_free( binumer );
+ bi_free( bidenom );
+ return bi_0;
+ case 0:
+ bi_free( binumer );
+ bi_free( bidenom );
+ if ( sign == 1 )
+ return bi_1;
+ else
+ return bi_m1;
+ }
+
+ /* Is the denominator small enough to do an int divide? */
+ if ( bidenom->num_comps == 1 )
+ {
+ /* Win! */
+ biquotient = bi_int_divide( binumer, bidenom->comps[0] );
+ bi_free( bidenom );
+ }
+ else
+ {
+ /* No, we have to do a full multi-by-multi divide. */
+ int num_bits, den_bits, i;
+
+ num_bits = bi_bits( bi_copy( binumer ) );
+ den_bits = bi_bits( bi_copy( bidenom ) );
+ bidenom = bi_multiply( bidenom, bi_power( bi_2, int_to_bi( num_bits - den_bits ) ) );
+ biquotient = bi_0;
+ for ( i = den_bits; i <= num_bits; ++i )
+ {
+ biquotient = bi_double( biquotient );
+ if ( bi_compare( bi_copy( binumer ), bi_copy( bidenom ) ) >= 0 )
+ {
+ biquotient = bi_int_add( biquotient, 1 );
+ binumer = bi_subtract( binumer, bi_copy( bidenom ) );
+ }
+ bidenom = bi_half( bidenom );
+ }
+ bi_free( binumer );
+ bi_free( bidenom );
+ }
+
+ if ( sign == -1 )
+ biquotient = bi_negate( biquotient );
+ return biquotient;
+ }
+
+
+bigint
+bi_negate( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+ real_bigint biR;
+
+ check( bi );
+ biR = clone( bi );
+ biR->sign = -biR->sign;
+ check( biR );
+ return biR;
+ }
+
+
+bigint
+bi_abs( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+ real_bigint biR;
+
+ check( bi );
+ biR = clone( bi );
+ biR->sign = 1;
+ check( biR );
+ return biR;
+ }
+
+
+bigint
+bi_half( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+ real_bigint biR;
+ int c;
+
+ check( bi );
+ /* This depends on the radix being even. */
+ biR = clone( bi );
+ for ( c = 0; c < biR->num_comps; ++c )
+ {
+ if ( biR->comps[c] & 1 )
+ if ( c > 0 )
+ biR->comps[c - 1] += bi_radix_o2;
+ biR->comps[c] = biR->comps[c] >> 1;
+ }
+ /* Avoid normalization. */
+ if ( biR->num_comps > 1 && biR->comps[biR->num_comps-1] == 0 )
+ --biR->num_comps;
+ check( biR );
+ return biR;
+ }
+
+
+bigint
+bi_double( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+ real_bigint biR;
+ int c;
+
+ check( bi );
+ biR = clone( bi );
+ for ( c = biR->num_comps - 1; c >= 0; --c )
+ {
+ biR->comps[c] = biR->comps[c] << 1;
+ if ( biR->comps[c] >= bi_radix )
+ {
+ if ( c + 1 >= biR->num_comps )
+ more_comps( biR, biR->num_comps + 1 );
+ biR->comps[c] -= bi_radix;
+ biR->comps[c + 1] += 1;
+ }
+ }
+ check( biR );
+ return biR;
+ }
+
+
+/* Find integer square root by Newton's method. */
+bigint
+bi_sqrt( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+ bigint biR, biR2, bidiff;
+
+ switch ( bi_compare( bi_copy( bi ), bi_0 ) )
+ {
+ case -1:
+ (void) fprintf( stderr, "bi_sqrt: imaginary result\n" );
+ (void) kill( getpid(), SIGFPE );
+ case 0:
+ return bi;
+ }
+ if ( bi_is_one( bi_copy( bi ) ) )
+ return bi;
+
+ /* Newton's method converges reasonably fast, but it helps to have
+ ** a good initial guess. We can make a *very* good initial guess
+ ** by taking the square root of the top component times the square
+ ** root of the radix part. Both of those are easy to compute.
+ */
+ biR = bi_int_multiply(
+ bi_power( int_to_bi( bi_radix_sqrt ), int_to_bi( bi->num_comps - 1 ) ),
+ csqrt( bi->comps[bi->num_comps - 1] ) );
+
+ /* Now do the Newton loop until we have the answer. */
+ for (;;)
+ {
+ biR2 = bi_divide( bi_copy( bi ), bi_copy( biR ) );
+ bidiff = bi_subtract( bi_copy( biR ), bi_copy( biR2 ) );
+ if ( bi_is_zero( bi_copy( bidiff ) ) ||
+ bi_compare( bi_copy( bidiff ), bi_m1 ) == 0 )
+ {
+ bi_free( bi );
+ bi_free( bidiff );
+ bi_free( biR2 );
+ return biR;
+ }
+ if ( bi_is_one( bi_copy( bidiff ) ) )
+ {
+ bi_free( bi );
+ bi_free( bidiff );
+ bi_free( biR );
+ return biR2;
+ }
+ bi_free( bidiff );
+ biR = bi_half( bi_add( biR, biR2 ) );
+ }
+ }
+
+
+int
+bi_is_odd( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+ int r;
+
+ check( bi );
+ r = bi->comps[0] & 1;
+ bi_free( bi );
+ return r;
+ }
+
+
+int
+bi_is_zero( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+ int r;
+
+ check( bi );
+ r = ( bi->sign == 1 && bi->num_comps == 1 && bi->comps[0] == 0 );
+ bi_free( bi );
+ return r;
+ }
+
+
+int
+bi_is_one( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+ int r;
+
+ check( bi );
+ r = ( bi->sign == 1 && bi->num_comps == 1 && bi->comps[0] == 1 );
+ bi_free( bi );
+ return r;
+ }
+
+
+int
+bi_is_negative( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+ int r;
+
+ check( bi );
+ r = ( bi->sign == -1 );
+ bi_free( bi );
+ return r;
+ }
+
+
+bigint
+bi_random( bigint bi )
+ {
+ real_bigint biR;
+ int c;
+
+ biR = bi_multiply( bi_copy( bi ), bi_copy( bi ) );
+ for ( c = 0; c < biR->num_comps; ++c )
+ biR->comps[c] = random();
+ normalize( biR );
+ biR = bi_mod( biR, bi );
+ return biR;
+ }
+
+
+int
+bi_bits( bigint obi )
+ {
+ real_bigint bi = (real_bigint) obi;
+ int bits;
+
+ bits =
+ bi_comp_bits * ( bi->num_comps - 1 ) +
+ cbits( bi->comps[bi->num_comps - 1] );
+ bi_free( bi );
+ return bits;
+ }
+
+
+/* Allocate and zero more components. Does not consume bi, of course. */
+static void
+more_comps( real_bigint bi, int n )
+ {
+ if ( n > bi->max_comps )
+ {
+ bi->max_comps = max( bi->max_comps * 2, n );
+ bi->comps = (comp*) realloc(
+ (void*) bi->comps, bi->max_comps * sizeof(comp) );
+ if ( bi->comps == (comp*) 0 )
+ {
+ (void) fprintf( stderr, "out of memory\n" );
+ exit( 1 );
+ }
+ }
+ for ( ; bi->num_comps < n; ++bi->num_comps )
+ bi->comps[bi->num_comps] = 0;
+ }
+
+
+/* Make a new empty bigint. Fills in everything except sign and the
+** components.
+*/
+static real_bigint
+alloc( int num_comps )
+ {
+ real_bigint biR;
+
+ /* Can we recycle an old bigint? */
+ if ( free_list != (real_bigint) 0 )
+ {
+ biR = free_list;
+ free_list = biR->next;
+ --free_count;
+ if ( check_level >= 1 && biR->refs != 0 )
+ {
+ (void) fprintf( stderr, "alloc: refs was not 0\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ more_comps( biR, num_comps );
+ }
+ else
+ {
+ /* No free bigints available - create a new one. */
+ biR = (real_bigint) malloc( sizeof(struct _real_bigint) );
+ if ( biR == (real_bigint) 0 )
+ {
+ (void) fprintf( stderr, "out of memory\n" );
+ exit( 1 );
+ }
+ biR->comps = (comp*) malloc( num_comps * sizeof(comp) );
+ if ( biR->comps == (comp*) 0 )
+ {
+ (void) fprintf( stderr, "out of memory\n" );
+ exit( 1 );
+ }
+ biR->max_comps = num_comps;
+ }
+ biR->num_comps = num_comps;
+ biR->refs = 1;
+ if ( check_level >= 3 )
+ {
+ /* The active list only gets maintained at check levels 3 or higher. */
+ biR->next = active_list;
+ active_list = biR;
+ }
+ else
+ biR->next = (real_bigint) 0;
+ ++active_count;
+ return biR;
+ }
+
+
+/* Make a modifiable copy of bi. DOES consume bi. */
+static real_bigint
+clone( real_bigint bi )
+ {
+ real_bigint biR;
+ int c;
+
+ /* Very clever optimization. */
+ if ( bi->refs != PERMANENT && bi->refs == 1 )
+ return bi;
+
+ biR = alloc( bi->num_comps );
+ biR->sign = bi->sign;
+ for ( c = 0; c < bi->num_comps; ++c )
+ biR->comps[c] = bi->comps[c];
+ bi_free( bi );
+ return biR;
+ }
+
+
+/* Put bi into normal form. Does not consume bi, of course.
+**
+** Normal form is:
+** - All components >= 0 and < bi_radix.
+** - Leading 0 components removed.
+** - Sign either 1 or -1.
+** - The number zero represented by a single 0 component and a sign of 1.
+*/
+static void
+normalize( real_bigint bi )
+ {
+ int c;
+
+ /* Borrow for negative components. Got to be careful with the math here:
+ ** -9 / 10 == 0 -9 % 10 == -9
+ ** -10 / 10 == -1 -10 % 10 == 0
+ ** -11 / 10 == -1 -11 % 10 == -1
+ */
+ for ( c = 0; c < bi->num_comps - 1; ++c )
+ if ( bi->comps[c] < 0 )
+ {
+ bi->comps[c+1] += bi->comps[c] / bi_radix - 1;
+ bi->comps[c] = bi->comps[c] % bi_radix;
+ if ( bi->comps[c] != 0 )
+ bi->comps[c] += bi_radix;
+ else
+ bi->comps[c+1] += 1;
+ }
+ /* Is the top component negative? */
+ if ( bi->comps[bi->num_comps - 1] < 0 )
+ {
+ /* Switch the sign of the number, and fix up the components. */
+ bi->sign = -bi->sign;
+ for ( c = 0; c < bi->num_comps - 1; ++c )
+ {
+ bi->comps[c] = bi_radix - bi->comps[c];
+ bi->comps[c + 1] += 1;
+ }
+ bi->comps[bi->num_comps - 1] = -bi->comps[bi->num_comps - 1];
+ }
+
+ /* Carry for components larger than the radix. */
+ for ( c = 0; c < bi->num_comps; ++c )
+ if ( bi->comps[c] >= bi_radix )
+ {
+ if ( c + 1 >= bi->num_comps )
+ more_comps( bi, bi->num_comps + 1 );
+ bi->comps[c+1] += bi->comps[c] / bi_radix;
+ bi->comps[c] = bi->comps[c] % bi_radix;
+ }
+
+ /* Trim off any leading zero components. */
+ for ( ; bi->num_comps > 1 && bi->comps[bi->num_comps-1] == 0; --bi->num_comps )
+ ;
+
+ /* Check for -0. */
+ if ( bi->num_comps == 1 && bi->comps[0] == 0 && bi->sign == -1 )
+ bi->sign = 1;
+ }
+
+
+static void
+check( real_bigint bi )
+ {
+ if ( check_level == 0 )
+ return;
+ if ( bi->refs == 0 )
+ {
+ (void) fprintf( stderr, "check: zero refs in bigint\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ if ( bi->refs < 0 )
+ {
+ (void) fprintf( stderr, "check: negative refs in bigint\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ if ( check_level < 3 )
+ {
+ /* At check levels less than 3, active bigints have a zero next. */
+ if ( bi->next != (real_bigint) 0 )
+ {
+ (void) fprintf(
+ stderr, "check: attempt to use a bigint from the free list\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ }
+ else
+ {
+ /* At check levels 3 or higher, active bigints must be on the active
+ ** list.
+ */
+ real_bigint p;
+
+ for ( p = active_list; p != (real_bigint) 0; p = p->next )
+ if ( p == bi )
+ break;
+ if ( p == (real_bigint) 0 )
+ {
+ (void) fprintf( stderr,
+ "check: attempt to use a bigint not on the active list\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ }
+ if ( check_level >= 2 )
+ double_check();
+ if ( check_level >= 3 )
+ triple_check();
+ }
+
+
+static void
+double_check( void )
+ {
+ real_bigint p;
+ int c;
+
+ for ( p = free_list, c = 0; p != (real_bigint) 0; p = p->next, ++c )
+ if ( p->refs != 0 )
+ {
+ (void) fprintf( stderr,
+ "double_check: found a non-zero ref on the free list\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ if ( c != free_count )
+ {
+ (void) fprintf( stderr,
+ "double_check: free_count is %d but the free list has %d items\n",
+ free_count, c );
+ (void) kill( getpid(), SIGFPE );
+ }
+ }
+
+
+static void
+triple_check( void )
+ {
+ real_bigint p;
+ int c;
+
+ for ( p = active_list, c = 0; p != (real_bigint) 0; p = p->next, ++c )
+ if ( p->refs == 0 )
+ {
+ (void) fprintf( stderr,
+ "triple_check: found a zero ref on the active list\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+ if ( c != active_count )
+ {
+ (void) fprintf( stderr,
+ "triple_check: active_count is %d but active_list has %d items\n",
+ free_count, c );
+ (void) kill( getpid(), SIGFPE );
+ }
+ }
+
+
+#ifdef DUMP
+/* Debug routine to dump out a complete bigint. Does not consume bi. */
+static void
+dump( char* str, bigint obi )
+ {
+ int c;
+ real_bigint bi = (real_bigint) obi;
+
+ (void) fprintf( stdout, "dump %s at 0x%08x:\n", str, (unsigned int) bi );
+ (void) fprintf( stdout, " refs: %d\n", bi->refs );
+ (void) fprintf( stdout, " next: 0x%08x\n", (unsigned int) bi->next );
+ (void) fprintf( stdout, " num_comps: %d\n", bi->num_comps );
+ (void) fprintf( stdout, " max_comps: %d\n", bi->max_comps );
+ (void) fprintf( stdout, " sign: %d\n", bi->sign );
+ for ( c = bi->num_comps - 1; c >= 0; --c )
+ (void) fprintf( stdout, " comps[%d]: %11lld (0x%016llx)\n", c, (long long) bi->comps[c], (long long) bi->comps[c] );
+ (void) fprintf( stdout, " print: " );
+ bi_print( stdout, bi_copy( bi ) );
+ (void) fprintf( stdout, "\n" );
+ }
+#endif /* DUMP */
+
+
+/* Trivial square-root routine so that we don't have to link in the math lib. */
+static int
+csqrt( comp c )
+ {
+ comp r, r2, diff;
+
+ if ( c < 0 )
+ {
+ (void) fprintf( stderr, "csqrt: imaginary result\n" );
+ (void) kill( getpid(), SIGFPE );
+ }
+
+ r = c / 2;
+ for (;;)
+ {
+ r2 = c / r;
+ diff = r - r2;
+ if ( diff == 0 || diff == -1 )
+ return (int) r;
+ if ( diff == 1 )
+ return (int) r2;
+ r = ( r + r2 ) / 2;
+ }
+ }
+
+
+/* Figure out how many bits are in a number. */
+static int
+cbits( comp c )
+ {
+ int b;
+
+ for ( b = 0; c != 0; ++b )
+ c >>= 1;
+ return b;
+ }
diff --git a/src/rt/bigint/low_primes.h b/src/rt/bigint/low_primes.h
new file mode 100644
index 00000000..c9d3df0b
--- /dev/null
+++ b/src/rt/bigint/low_primes.h
@@ -0,0 +1,1069 @@
+/* Primes up to 100000. */
+static long low_primes[] = {
+ 2, 3, 5, 7, 11, 13, 17, 19, 23,
+ 29, 31, 37, 41, 43, 47, 53, 59, 61,
+ 67, 71, 73, 79, 83, 89, 97, 101, 103,
+ 107, 109, 113, 127, 131, 137, 139, 149, 151,
+ 157, 163, 167, 173, 179, 181, 191, 193, 197,
+ 199, 211, 223, 227, 229, 233, 239, 241, 251,
+ 257, 263, 269, 271, 277, 281, 283, 293, 307,
+ 311, 313, 317, 331, 337, 347, 349, 353, 359,
+ 367, 373, 379, 383, 389, 397, 401, 409, 419,
+ 421, 431, 433, 439, 443, 449, 457, 461, 463,
+ 467, 479, 487, 491, 499, 503, 509, 521, 523,
+ 541, 547, 557, 563, 569, 571, 577, 587, 593,
+ 599, 601, 607, 613, 617, 619, 631, 641, 643,
+ 647, 653, 659, 661, 673, 677, 683, 691, 701,
+ 709, 719, 727, 733, 739, 743, 751, 757, 761,
+ 769, 773, 787, 797, 809, 811, 821, 823, 827,
+ 829, 839, 853, 857, 859, 863, 877, 881, 883,
+ 887, 907, 911, 919, 929, 937, 941, 947, 953,
+ 967, 971, 977, 983, 991, 997, 1009, 1013, 1019,
+ 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069,
+ 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129,
+ 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213,
+ 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279,
+ 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321,
+ 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427,
+ 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481,
+ 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543,
+ 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601,
+ 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663,
+ 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733,
+ 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801,
+ 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877,
+ 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951,
+ 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017,
+ 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087,
+ 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143,
+ 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239,
+ 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297,
+ 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371,
+ 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423,
+ 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521,
+ 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593,
+ 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671,
+ 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713,
+ 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789,
+ 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851,
+ 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927,
+ 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011,
+ 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083,
+ 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181,
+ 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253,
+ 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323,
+ 3329, 3331, 3343, 3347, 3359, 3361, 3371, 3373, 3389,
+ 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467,
+ 3469, 3491, 3499, 3511, 3517, 3527, 3529, 3533, 3539,
+ 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607,
+ 3613, 3617, 3623, 3631, 3637, 3643, 3659, 3671, 3673,
+ 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739,
+ 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821, 3823,
+ 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907,
+ 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967,
+ 3989, 4001, 4003, 4007, 4013, 4019, 4021, 4027, 4049,
+ 4051, 4057, 4073, 4079, 4091, 4093, 4099, 4111, 4127,
+ 4129, 4133, 4139, 4153, 4157, 4159, 4177, 4201, 4211,
+ 4217, 4219, 4229, 4231, 4241, 4243, 4253, 4259, 4261,
+ 4271, 4273, 4283, 4289, 4297, 4327, 4337, 4339, 4349,
+ 4357, 4363, 4373, 4391, 4397, 4409, 4421, 4423, 4441,
+ 4447, 4451, 4457, 4463, 4481, 4483, 4493, 4507, 4513,
+ 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, 4591,
+ 4597, 4603, 4621, 4637, 4639, 4643, 4649, 4651, 4657,
+ 4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733,
+ 4751, 4759, 4783, 4787, 4789, 4793, 4799, 4801, 4813,
+ 4817, 4831, 4861, 4871, 4877, 4889, 4903, 4909, 4919,
+ 4931, 4933, 4937, 4943, 4951, 4957, 4967, 4969, 4973,
+ 4987, 4993, 4999, 5003, 5009, 5011, 5021, 5023, 5039,
+ 5051, 5059, 5077, 5081, 5087, 5099, 5101, 5107, 5113,
+ 5119, 5147, 5153, 5167, 5171, 5179, 5189, 5197, 5209,
+ 5227, 5231, 5233, 5237, 5261, 5273, 5279, 5281, 5297,
+ 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387, 5393,
+ 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443,
+ 5449, 5471, 5477, 5479, 5483, 5501, 5503, 5507, 5519,
+ 5521, 5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591,
+ 5623, 5639, 5641, 5647, 5651, 5653, 5657, 5659, 5669,
+ 5683, 5689, 5693, 5701, 5711, 5717, 5737, 5741, 5743,
+ 5749, 5779, 5783, 5791, 5801, 5807, 5813, 5821, 5827,
+ 5839, 5843, 5849, 5851, 5857, 5861, 5867, 5869, 5879,
+ 5881, 5897, 5903, 5923, 5927, 5939, 5953, 5981, 5987,
+ 6007, 6011, 6029, 6037, 6043, 6047, 6053, 6067, 6073,
+ 6079, 6089, 6091, 6101, 6113, 6121, 6131, 6133, 6143,
+ 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221,
+ 6229, 6247, 6257, 6263, 6269, 6271, 6277, 6287, 6299,
+ 6301, 6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359,
+ 6361, 6367, 6373, 6379, 6389, 6397, 6421, 6427, 6449,
+ 6451, 6469, 6473, 6481, 6491, 6521, 6529, 6547, 6551,
+ 6553, 6563, 6569, 6571, 6577, 6581, 6599, 6607, 6619,
+ 6637, 6653, 6659, 6661, 6673, 6679, 6689, 6691, 6701,
+ 6703, 6709, 6719, 6733, 6737, 6761, 6763, 6779, 6781,
+ 6791, 6793, 6803, 6823, 6827, 6829, 6833, 6841, 6857,
+ 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, 6947,
+ 6949, 6959, 6961, 6967, 6971, 6977, 6983, 6991, 6997,
+ 7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079,
+ 7103, 7109, 7121, 7127, 7129, 7151, 7159, 7177, 7187,
+ 7193, 7207, 7211, 7213, 7219, 7229, 7237, 7243, 7247,
+ 7253, 7283, 7297, 7307, 7309, 7321, 7331, 7333, 7349,
+ 7351, 7369, 7393, 7411, 7417, 7433, 7451, 7457, 7459,
+ 7477, 7481, 7487, 7489, 7499, 7507, 7517, 7523, 7529,
+ 7537, 7541, 7547, 7549, 7559, 7561, 7573, 7577, 7583,
+ 7589, 7591, 7603, 7607, 7621, 7639, 7643, 7649, 7669,
+ 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, 7727,
+ 7741, 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829,
+ 7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907,
+ 7919, 7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009,
+ 8011, 8017, 8039, 8053, 8059, 8069, 8081, 8087, 8089,
+ 8093, 8101, 8111, 8117, 8123, 8147, 8161, 8167, 8171,
+ 8179, 8191, 8209, 8219, 8221, 8231, 8233, 8237, 8243,
+ 8263, 8269, 8273, 8287, 8291, 8293, 8297, 8311, 8317,
+ 8329, 8353, 8363, 8369, 8377, 8387, 8389, 8419, 8423,
+ 8429, 8431, 8443, 8447, 8461, 8467, 8501, 8513, 8521,
+ 8527, 8537, 8539, 8543, 8563, 8573, 8581, 8597, 8599,
+ 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677,
+ 8681, 8689, 8693, 8699, 8707, 8713, 8719, 8731, 8737,
+ 8741, 8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819,
+ 8821, 8831, 8837, 8839, 8849, 8861, 8863, 8867, 8887,
+ 8893, 8923, 8929, 8933, 8941, 8951, 8963, 8969, 8971,
+ 8999, 9001, 9007, 9011, 9013, 9029, 9041, 9043, 9049,
+ 9059, 9067, 9091, 9103, 9109, 9127, 9133, 9137, 9151,
+ 9157, 9161, 9173, 9181, 9187, 9199, 9203, 9209, 9221,
+ 9227, 9239, 9241, 9257, 9277, 9281, 9283, 9293, 9311,
+ 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377, 9391,
+ 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437, 9439,
+ 9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521,
+ 9533, 9539, 9547, 9551, 9587, 9601, 9613, 9619, 9623,
+ 9629, 9631, 9643, 9649, 9661, 9677, 9679, 9689, 9697,
+ 9719, 9721, 9733, 9739, 9743, 9749, 9767, 9769, 9781,
+ 9787, 9791, 9803, 9811, 9817, 9829, 9833, 9839, 9851,
+ 9857, 9859, 9871, 9883, 9887, 9901, 9907, 9923, 9929,
+ 9931, 9941, 9949, 9967, 9973, 10007, 10009, 10037, 10039,
+ 10061, 10067, 10069, 10079, 10091, 10093, 10099, 10103, 10111,
+ 10133, 10139, 10141, 10151, 10159, 10163, 10169, 10177, 10181,
+ 10193, 10211, 10223, 10243, 10247, 10253, 10259, 10267, 10271,
+ 10273, 10289, 10301, 10303, 10313, 10321, 10331, 10333, 10337,
+ 10343, 10357, 10369, 10391, 10399, 10427, 10429, 10433, 10453,
+ 10457, 10459, 10463, 10477, 10487, 10499, 10501, 10513, 10529,
+ 10531, 10559, 10567, 10589, 10597, 10601, 10607, 10613, 10627,
+ 10631, 10639, 10651, 10657, 10663, 10667, 10687, 10691, 10709,
+ 10711, 10723, 10729, 10733, 10739, 10753, 10771, 10781, 10789,
+ 10799, 10831, 10837, 10847, 10853, 10859, 10861, 10867, 10883,
+ 10889, 10891, 10903, 10909, 10937, 10939, 10949, 10957, 10973,
+ 10979, 10987, 10993, 11003, 11027, 11047, 11057, 11059, 11069,
+ 11071, 11083, 11087, 11093, 11113, 11117, 11119, 11131, 11149,
+ 11159, 11161, 11171, 11173, 11177, 11197, 11213, 11239, 11243,
+ 11251, 11257, 11261, 11273, 11279, 11287, 11299, 11311, 11317,
+ 11321, 11329, 11351, 11353, 11369, 11383, 11393, 11399, 11411,
+ 11423, 11437, 11443, 11447, 11467, 11471, 11483, 11489, 11491,
+ 11497, 11503, 11519, 11527, 11549, 11551, 11579, 11587, 11593,
+ 11597, 11617, 11621, 11633, 11657, 11677, 11681, 11689, 11699,
+ 11701, 11717, 11719, 11731, 11743, 11777, 11779, 11783, 11789,
+ 11801, 11807, 11813, 11821, 11827, 11831, 11833, 11839, 11863,
+ 11867, 11887, 11897, 11903, 11909, 11923, 11927, 11933, 11939,
+ 11941, 11953, 11959, 11969, 11971, 11981, 11987, 12007, 12011,
+ 12037, 12041, 12043, 12049, 12071, 12073, 12097, 12101, 12107,
+ 12109, 12113, 12119, 12143, 12149, 12157, 12161, 12163, 12197,
+ 12203, 12211, 12227, 12239, 12241, 12251, 12253, 12263, 12269,
+ 12277, 12281, 12289, 12301, 12323, 12329, 12343, 12347, 12373,
+ 12377, 12379, 12391, 12401, 12409, 12413, 12421, 12433, 12437,
+ 12451, 12457, 12473, 12479, 12487, 12491, 12497, 12503, 12511,
+ 12517, 12527, 12539, 12541, 12547, 12553, 12569, 12577, 12583,
+ 12589, 12601, 12611, 12613, 12619, 12637, 12641, 12647, 12653,
+ 12659, 12671, 12689, 12697, 12703, 12713, 12721, 12739, 12743,
+ 12757, 12763, 12781, 12791, 12799, 12809, 12821, 12823, 12829,
+ 12841, 12853, 12889, 12893, 12899, 12907, 12911, 12917, 12919,
+ 12923, 12941, 12953, 12959, 12967, 12973, 12979, 12983, 13001,
+ 13003, 13007, 13009, 13033, 13037, 13043, 13049, 13063, 13093,
+ 13099, 13103, 13109, 13121, 13127, 13147, 13151, 13159, 13163,
+ 13171, 13177, 13183, 13187, 13217, 13219, 13229, 13241, 13249,
+ 13259, 13267, 13291, 13297, 13309, 13313, 13327, 13331, 13337,
+ 13339, 13367, 13381, 13397, 13399, 13411, 13417, 13421, 13441,
+ 13451, 13457, 13463, 13469, 13477, 13487, 13499, 13513, 13523,
+ 13537, 13553, 13567, 13577, 13591, 13597, 13613, 13619, 13627,
+ 13633, 13649, 13669, 13679, 13681, 13687, 13691, 13693, 13697,
+ 13709, 13711, 13721, 13723, 13729, 13751, 13757, 13759, 13763,
+ 13781, 13789, 13799, 13807, 13829, 13831, 13841, 13859, 13873,
+ 13877, 13879, 13883, 13901, 13903, 13907, 13913, 13921, 13931,
+ 13933, 13963, 13967, 13997, 13999, 14009, 14011, 14029, 14033,
+ 14051, 14057, 14071, 14081, 14083, 14087, 14107, 14143, 14149,
+ 14153, 14159, 14173, 14177, 14197, 14207, 14221, 14243, 14249,
+ 14251, 14281, 14293, 14303, 14321, 14323, 14327, 14341, 14347,
+ 14369, 14387, 14389, 14401, 14407, 14411, 14419, 14423, 14431,
+ 14437, 14447, 14449, 14461, 14479, 14489, 14503, 14519, 14533,
+ 14537, 14543, 14549, 14551, 14557, 14561, 14563, 14591, 14593,
+ 14621, 14627, 14629, 14633, 14639, 14653, 14657, 14669, 14683,
+ 14699, 14713, 14717, 14723, 14731, 14737, 14741, 14747, 14753,
+ 14759, 14767, 14771, 14779, 14783, 14797, 14813, 14821, 14827,
+ 14831, 14843, 14851, 14867, 14869, 14879, 14887, 14891, 14897,
+ 14923, 14929, 14939, 14947, 14951, 14957, 14969, 14983, 15013,
+ 15017, 15031, 15053, 15061, 15073, 15077, 15083, 15091, 15101,
+ 15107, 15121, 15131, 15137, 15139, 15149, 15161, 15173, 15187,
+ 15193, 15199, 15217, 15227, 15233, 15241, 15259, 15263, 15269,
+ 15271, 15277, 15287, 15289, 15299, 15307, 15313, 15319, 15329,
+ 15331, 15349, 15359, 15361, 15373, 15377, 15383, 15391, 15401,
+ 15413, 15427, 15439, 15443, 15451, 15461, 15467, 15473, 15493,
+ 15497, 15511, 15527, 15541, 15551, 15559, 15569, 15581, 15583,
+ 15601, 15607, 15619, 15629, 15641, 15643, 15647, 15649, 15661,
+ 15667, 15671, 15679, 15683, 15727, 15731, 15733, 15737, 15739,
+ 15749, 15761, 15767, 15773, 15787, 15791, 15797, 15803, 15809,
+ 15817, 15823, 15859, 15877, 15881, 15887, 15889, 15901, 15907,
+ 15913, 15919, 15923, 15937, 15959, 15971, 15973, 15991, 16001,
+ 16007, 16033, 16057, 16061, 16063, 16067, 16069, 16073, 16087,
+ 16091, 16097, 16103, 16111, 16127, 16139, 16141, 16183, 16187,
+ 16189, 16193, 16217, 16223, 16229, 16231, 16249, 16253, 16267,
+ 16273, 16301, 16319, 16333, 16339, 16349, 16361, 16363, 16369,
+ 16381, 16411, 16417, 16421, 16427, 16433, 16447, 16451, 16453,
+ 16477, 16481, 16487, 16493, 16519, 16529, 16547, 16553, 16561,
+ 16567, 16573, 16603, 16607, 16619, 16631, 16633, 16649, 16651,
+ 16657, 16661, 16673, 16691, 16693, 16699, 16703, 16729, 16741,
+ 16747, 16759, 16763, 16787, 16811, 16823, 16829, 16831, 16843,
+ 16871, 16879, 16883, 16889, 16901, 16903, 16921, 16927, 16931,
+ 16937, 16943, 16963, 16979, 16981, 16987, 16993, 17011, 17021,
+ 17027, 17029, 17033, 17041, 17047, 17053, 17077, 17093, 17099,
+ 17107, 17117, 17123, 17137, 17159, 17167, 17183, 17189, 17191,
+ 17203, 17207, 17209, 17231, 17239, 17257, 17291, 17293, 17299,
+ 17317, 17321, 17327, 17333, 17341, 17351, 17359, 17377, 17383,
+ 17387, 17389, 17393, 17401, 17417, 17419, 17431, 17443, 17449,
+ 17467, 17471, 17477, 17483, 17489, 17491, 17497, 17509, 17519,
+ 17539, 17551, 17569, 17573, 17579, 17581, 17597, 17599, 17609,
+ 17623, 17627, 17657, 17659, 17669, 17681, 17683, 17707, 17713,
+ 17729, 17737, 17747, 17749, 17761, 17783, 17789, 17791, 17807,
+ 17827, 17837, 17839, 17851, 17863, 17881, 17891, 17903, 17909,
+ 17911, 17921, 17923, 17929, 17939, 17957, 17959, 17971, 17977,
+ 17981, 17987, 17989, 18013, 18041, 18043, 18047, 18049, 18059,
+ 18061, 18077, 18089, 18097, 18119, 18121, 18127, 18131, 18133,
+ 18143, 18149, 18169, 18181, 18191, 18199, 18211, 18217, 18223,
+ 18229, 18233, 18251, 18253, 18257, 18269, 18287, 18289, 18301,
+ 18307, 18311, 18313, 18329, 18341, 18353, 18367, 18371, 18379,
+ 18397, 18401, 18413, 18427, 18433, 18439, 18443, 18451, 18457,
+ 18461, 18481, 18493, 18503, 18517, 18521, 18523, 18539, 18541,
+ 18553, 18583, 18587, 18593, 18617, 18637, 18661, 18671, 18679,
+ 18691, 18701, 18713, 18719, 18731, 18743, 18749, 18757, 18773,
+ 18787, 18793, 18797, 18803, 18839, 18859, 18869, 18899, 18911,
+ 18913, 18917, 18919, 18947, 18959, 18973, 18979, 19001, 19009,
+ 19013, 19031, 19037, 19051, 19069, 19073, 19079, 19081, 19087,
+ 19121, 19139, 19141, 19157, 19163, 19181, 19183, 19207, 19211,
+ 19213, 19219, 19231, 19237, 19249, 19259, 19267, 19273, 19289,
+ 19301, 19309, 19319, 19333, 19373, 19379, 19381, 19387, 19391,
+ 19403, 19417, 19421, 19423, 19427, 19429, 19433, 19441, 19447,
+ 19457, 19463, 19469, 19471, 19477, 19483, 19489, 19501, 19507,
+ 19531, 19541, 19543, 19553, 19559, 19571, 19577, 19583, 19597,
+ 19603, 19609, 19661, 19681, 19687, 19697, 19699, 19709, 19717,
+ 19727, 19739, 19751, 19753, 19759, 19763, 19777, 19793, 19801,
+ 19813, 19819, 19841, 19843, 19853, 19861, 19867, 19889, 19891,
+ 19913, 19919, 19927, 19937, 19949, 19961, 19963, 19973, 19979,
+ 19991, 19993, 19997, 20011, 20021, 20023, 20029, 20047, 20051,
+ 20063, 20071, 20089, 20101, 20107, 20113, 20117, 20123, 20129,
+ 20143, 20147, 20149, 20161, 20173, 20177, 20183, 20201, 20219,
+ 20231, 20233, 20249, 20261, 20269, 20287, 20297, 20323, 20327,
+ 20333, 20341, 20347, 20353, 20357, 20359, 20369, 20389, 20393,
+ 20399, 20407, 20411, 20431, 20441, 20443, 20477, 20479, 20483,
+ 20507, 20509, 20521, 20533, 20543, 20549, 20551, 20563, 20593,
+ 20599, 20611, 20627, 20639, 20641, 20663, 20681, 20693, 20707,
+ 20717, 20719, 20731, 20743, 20747, 20749, 20753, 20759, 20771,
+ 20773, 20789, 20807, 20809, 20849, 20857, 20873, 20879, 20887,
+ 20897, 20899, 20903, 20921, 20929, 20939, 20947, 20959, 20963,
+ 20981, 20983, 21001, 21011, 21013, 21017, 21019, 21023, 21031,
+ 21059, 21061, 21067, 21089, 21101, 21107, 21121, 21139, 21143,
+ 21149, 21157, 21163, 21169, 21179, 21187, 21191, 21193, 21211,
+ 21221, 21227, 21247, 21269, 21277, 21283, 21313, 21317, 21319,
+ 21323, 21341, 21347, 21377, 21379, 21383, 21391, 21397, 21401,
+ 21407, 21419, 21433, 21467, 21481, 21487, 21491, 21493, 21499,
+ 21503, 21517, 21521, 21523, 21529, 21557, 21559, 21563, 21569,
+ 21577, 21587, 21589, 21599, 21601, 21611, 21613, 21617, 21647,
+ 21649, 21661, 21673, 21683, 21701, 21713, 21727, 21737, 21739,
+ 21751, 21757, 21767, 21773, 21787, 21799, 21803, 21817, 21821,
+ 21839, 21841, 21851, 21859, 21863, 21871, 21881, 21893, 21911,
+ 21929, 21937, 21943, 21961, 21977, 21991, 21997, 22003, 22013,
+ 22027, 22031, 22037, 22039, 22051, 22063, 22067, 22073, 22079,
+ 22091, 22093, 22109, 22111, 22123, 22129, 22133, 22147, 22153,
+ 22157, 22159, 22171, 22189, 22193, 22229, 22247, 22259, 22271,
+ 22273, 22277, 22279, 22283, 22291, 22303, 22307, 22343, 22349,
+ 22367, 22369, 22381, 22391, 22397, 22409, 22433, 22441, 22447,
+ 22453, 22469, 22481, 22483, 22501, 22511, 22531, 22541, 22543,
+ 22549, 22567, 22571, 22573, 22613, 22619, 22621, 22637, 22639,
+ 22643, 22651, 22669, 22679, 22691, 22697, 22699, 22709, 22717,
+ 22721, 22727, 22739, 22741, 22751, 22769, 22777, 22783, 22787,
+ 22807, 22811, 22817, 22853, 22859, 22861, 22871, 22877, 22901,
+ 22907, 22921, 22937, 22943, 22961, 22963, 22973, 22993, 23003,
+ 23011, 23017, 23021, 23027, 23029, 23039, 23041, 23053, 23057,
+ 23059, 23063, 23071, 23081, 23087, 23099, 23117, 23131, 23143,
+ 23159, 23167, 23173, 23189, 23197, 23201, 23203, 23209, 23227,
+ 23251, 23269, 23279, 23291, 23293, 23297, 23311, 23321, 23327,
+ 23333, 23339, 23357, 23369, 23371, 23399, 23417, 23431, 23447,
+ 23459, 23473, 23497, 23509, 23531, 23537, 23539, 23549, 23557,
+ 23561, 23563, 23567, 23581, 23593, 23599, 23603, 23609, 23623,
+ 23627, 23629, 23633, 23663, 23669, 23671, 23677, 23687, 23689,
+ 23719, 23741, 23743, 23747, 23753, 23761, 23767, 23773, 23789,
+ 23801, 23813, 23819, 23827, 23831, 23833, 23857, 23869, 23873,
+ 23879, 23887, 23893, 23899, 23909, 23911, 23917, 23929, 23957,
+ 23971, 23977, 23981, 23993, 24001, 24007, 24019, 24023, 24029,
+ 24043, 24049, 24061, 24071, 24077, 24083, 24091, 24097, 24103,
+ 24107, 24109, 24113, 24121, 24133, 24137, 24151, 24169, 24179,
+ 24181, 24197, 24203, 24223, 24229, 24239, 24247, 24251, 24281,
+ 24317, 24329, 24337, 24359, 24371, 24373, 24379, 24391, 24407,
+ 24413, 24419, 24421, 24439, 24443, 24469, 24473, 24481, 24499,
+ 24509, 24517, 24527, 24533, 24547, 24551, 24571, 24593, 24611,
+ 24623, 24631, 24659, 24671, 24677, 24683, 24691, 24697, 24709,
+ 24733, 24749, 24763, 24767, 24781, 24793, 24799, 24809, 24821,
+ 24841, 24847, 24851, 24859, 24877, 24889, 24907, 24917, 24919,
+ 24923, 24943, 24953, 24967, 24971, 24977, 24979, 24989, 25013,
+ 25031, 25033, 25037, 25057, 25073, 25087, 25097, 25111, 25117,
+ 25121, 25127, 25147, 25153, 25163, 25169, 25171, 25183, 25189,
+ 25219, 25229, 25237, 25243, 25247, 25253, 25261, 25301, 25303,
+ 25307, 25309, 25321, 25339, 25343, 25349, 25357, 25367, 25373,
+ 25391, 25409, 25411, 25423, 25439, 25447, 25453, 25457, 25463,
+ 25469, 25471, 25523, 25537, 25541, 25561, 25577, 25579, 25583,
+ 25589, 25601, 25603, 25609, 25621, 25633, 25639, 25643, 25657,
+ 25667, 25673, 25679, 25693, 25703, 25717, 25733, 25741, 25747,
+ 25759, 25763, 25771, 25793, 25799, 25801, 25819, 25841, 25847,
+ 25849, 25867, 25873, 25889, 25903, 25913, 25919, 25931, 25933,
+ 25939, 25943, 25951, 25969, 25981, 25997, 25999, 26003, 26017,
+ 26021, 26029, 26041, 26053, 26083, 26099, 26107, 26111, 26113,
+ 26119, 26141, 26153, 26161, 26171, 26177, 26183, 26189, 26203,
+ 26209, 26227, 26237, 26249, 26251, 26261, 26263, 26267, 26293,
+ 26297, 26309, 26317, 26321, 26339, 26347, 26357, 26371, 26387,
+ 26393, 26399, 26407, 26417, 26423, 26431, 26437, 26449, 26459,
+ 26479, 26489, 26497, 26501, 26513, 26539, 26557, 26561, 26573,
+ 26591, 26597, 26627, 26633, 26641, 26647, 26669, 26681, 26683,
+ 26687, 26693, 26699, 26701, 26711, 26713, 26717, 26723, 26729,
+ 26731, 26737, 26759, 26777, 26783, 26801, 26813, 26821, 26833,
+ 26839, 26849, 26861, 26863, 26879, 26881, 26891, 26893, 26903,
+ 26921, 26927, 26947, 26951, 26953, 26959, 26981, 26987, 26993,
+ 27011, 27017, 27031, 27043, 27059, 27061, 27067, 27073, 27077,
+ 27091, 27103, 27107, 27109, 27127, 27143, 27179, 27191, 27197,
+ 27211, 27239, 27241, 27253, 27259, 27271, 27277, 27281, 27283,
+ 27299, 27329, 27337, 27361, 27367, 27397, 27407, 27409, 27427,
+ 27431, 27437, 27449, 27457, 27479, 27481, 27487, 27509, 27527,
+ 27529, 27539, 27541, 27551, 27581, 27583, 27611, 27617, 27631,
+ 27647, 27653, 27673, 27689, 27691, 27697, 27701, 27733, 27737,
+ 27739, 27743, 27749, 27751, 27763, 27767, 27773, 27779, 27791,
+ 27793, 27799, 27803, 27809, 27817, 27823, 27827, 27847, 27851,
+ 27883, 27893, 27901, 27917, 27919, 27941, 27943, 27947, 27953,
+ 27961, 27967, 27983, 27997, 28001, 28019, 28027, 28031, 28051,
+ 28057, 28069, 28081, 28087, 28097, 28099, 28109, 28111, 28123,
+ 28151, 28163, 28181, 28183, 28201, 28211, 28219, 28229, 28277,
+ 28279, 28283, 28289, 28297, 28307, 28309, 28319, 28349, 28351,
+ 28387, 28393, 28403, 28409, 28411, 28429, 28433, 28439, 28447,
+ 28463, 28477, 28493, 28499, 28513, 28517, 28537, 28541, 28547,
+ 28549, 28559, 28571, 28573, 28579, 28591, 28597, 28603, 28607,
+ 28619, 28621, 28627, 28631, 28643, 28649, 28657, 28661, 28663,
+ 28669, 28687, 28697, 28703, 28711, 28723, 28729, 28751, 28753,
+ 28759, 28771, 28789, 28793, 28807, 28813, 28817, 28837, 28843,
+ 28859, 28867, 28871, 28879, 28901, 28909, 28921, 28927, 28933,
+ 28949, 28961, 28979, 29009, 29017, 29021, 29023, 29027, 29033,
+ 29059, 29063, 29077, 29101, 29123, 29129, 29131, 29137, 29147,
+ 29153, 29167, 29173, 29179, 29191, 29201, 29207, 29209, 29221,
+ 29231, 29243, 29251, 29269, 29287, 29297, 29303, 29311, 29327,
+ 29333, 29339, 29347, 29363, 29383, 29387, 29389, 29399, 29401,
+ 29411, 29423, 29429, 29437, 29443, 29453, 29473, 29483, 29501,
+ 29527, 29531, 29537, 29567, 29569, 29573, 29581, 29587, 29599,
+ 29611, 29629, 29633, 29641, 29663, 29669, 29671, 29683, 29717,
+ 29723, 29741, 29753, 29759, 29761, 29789, 29803, 29819, 29833,
+ 29837, 29851, 29863, 29867, 29873, 29879, 29881, 29917, 29921,
+ 29927, 29947, 29959, 29983, 29989, 30011, 30013, 30029, 30047,
+ 30059, 30071, 30089, 30091, 30097, 30103, 30109, 30113, 30119,
+ 30133, 30137, 30139, 30161, 30169, 30181, 30187, 30197, 30203,
+ 30211, 30223, 30241, 30253, 30259, 30269, 30271, 30293, 30307,
+ 30313, 30319, 30323, 30341, 30347, 30367, 30389, 30391, 30403,
+ 30427, 30431, 30449, 30467, 30469, 30491, 30493, 30497, 30509,
+ 30517, 30529, 30539, 30553, 30557, 30559, 30577, 30593, 30631,
+ 30637, 30643, 30649, 30661, 30671, 30677, 30689, 30697, 30703,
+ 30707, 30713, 30727, 30757, 30763, 30773, 30781, 30803, 30809,
+ 30817, 30829, 30839, 30841, 30851, 30853, 30859, 30869, 30871,
+ 30881, 30893, 30911, 30931, 30937, 30941, 30949, 30971, 30977,
+ 30983, 31013, 31019, 31033, 31039, 31051, 31063, 31069, 31079,
+ 31081, 31091, 31121, 31123, 31139, 31147, 31151, 31153, 31159,
+ 31177, 31181, 31183, 31189, 31193, 31219, 31223, 31231, 31237,
+ 31247, 31249, 31253, 31259, 31267, 31271, 31277, 31307, 31319,
+ 31321, 31327, 31333, 31337, 31357, 31379, 31387, 31391, 31393,
+ 31397, 31469, 31477, 31481, 31489, 31511, 31513, 31517, 31531,
+ 31541, 31543, 31547, 31567, 31573, 31583, 31601, 31607, 31627,
+ 31643, 31649, 31657, 31663, 31667, 31687, 31699, 31721, 31723,
+ 31727, 31729, 31741, 31751, 31769, 31771, 31793, 31799, 31817,
+ 31847, 31849, 31859, 31873, 31883, 31891, 31907, 31957, 31963,
+ 31973, 31981, 31991, 32003, 32009, 32027, 32029, 32051, 32057,
+ 32059, 32063, 32069, 32077, 32083, 32089, 32099, 32117, 32119,
+ 32141, 32143, 32159, 32173, 32183, 32189, 32191, 32203, 32213,
+ 32233, 32237, 32251, 32257, 32261, 32297, 32299, 32303, 32309,
+ 32321, 32323, 32327, 32341, 32353, 32359, 32363, 32369, 32371,
+ 32377, 32381, 32401, 32411, 32413, 32423, 32429, 32441, 32443,
+ 32467, 32479, 32491, 32497, 32503, 32507, 32531, 32533, 32537,
+ 32561, 32563, 32569, 32573, 32579, 32587, 32603, 32609, 32611,
+ 32621, 32633, 32647, 32653, 32687, 32693, 32707, 32713, 32717,
+ 32719, 32749, 32771, 32779, 32783, 32789, 32797, 32801, 32803,
+ 32831, 32833, 32839, 32843, 32869, 32887, 32909, 32911, 32917,
+ 32933, 32939, 32941, 32957, 32969, 32971, 32983, 32987, 32993,
+ 32999, 33013, 33023, 33029, 33037, 33049, 33053, 33071, 33073,
+ 33083, 33091, 33107, 33113, 33119, 33149, 33151, 33161, 33179,
+ 33181, 33191, 33199, 33203, 33211, 33223, 33247, 33287, 33289,
+ 33301, 33311, 33317, 33329, 33331, 33343, 33347, 33349, 33353,
+ 33359, 33377, 33391, 33403, 33409, 33413, 33427, 33457, 33461,
+ 33469, 33479, 33487, 33493, 33503, 33521, 33529, 33533, 33547,
+ 33563, 33569, 33577, 33581, 33587, 33589, 33599, 33601, 33613,
+ 33617, 33619, 33623, 33629, 33637, 33641, 33647, 33679, 33703,
+ 33713, 33721, 33739, 33749, 33751, 33757, 33767, 33769, 33773,
+ 33791, 33797, 33809, 33811, 33827, 33829, 33851, 33857, 33863,
+ 33871, 33889, 33893, 33911, 33923, 33931, 33937, 33941, 33961,
+ 33967, 33997, 34019, 34031, 34033, 34039, 34057, 34061, 34123,
+ 34127, 34129, 34141, 34147, 34157, 34159, 34171, 34183, 34211,
+ 34213, 34217, 34231, 34253, 34259, 34261, 34267, 34273, 34283,
+ 34297, 34301, 34303, 34313, 34319, 34327, 34337, 34351, 34361,
+ 34367, 34369, 34381, 34403, 34421, 34429, 34439, 34457, 34469,
+ 34471, 34483, 34487, 34499, 34501, 34511, 34513, 34519, 34537,
+ 34543, 34549, 34583, 34589, 34591, 34603, 34607, 34613, 34631,
+ 34649, 34651, 34667, 34673, 34679, 34687, 34693, 34703, 34721,
+ 34729, 34739, 34747, 34757, 34759, 34763, 34781, 34807, 34819,
+ 34841, 34843, 34847, 34849, 34871, 34877, 34883, 34897, 34913,
+ 34919, 34939, 34949, 34961, 34963, 34981, 35023, 35027, 35051,
+ 35053, 35059, 35069, 35081, 35083, 35089, 35099, 35107, 35111,
+ 35117, 35129, 35141, 35149, 35153, 35159, 35171, 35201, 35221,
+ 35227, 35251, 35257, 35267, 35279, 35281, 35291, 35311, 35317,
+ 35323, 35327, 35339, 35353, 35363, 35381, 35393, 35401, 35407,
+ 35419, 35423, 35437, 35447, 35449, 35461, 35491, 35507, 35509,
+ 35521, 35527, 35531, 35533, 35537, 35543, 35569, 35573, 35591,
+ 35593, 35597, 35603, 35617, 35671, 35677, 35729, 35731, 35747,
+ 35753, 35759, 35771, 35797, 35801, 35803, 35809, 35831, 35837,
+ 35839, 35851, 35863, 35869, 35879, 35897, 35899, 35911, 35923,
+ 35933, 35951, 35963, 35969, 35977, 35983, 35993, 35999, 36007,
+ 36011, 36013, 36017, 36037, 36061, 36067, 36073, 36083, 36097,
+ 36107, 36109, 36131, 36137, 36151, 36161, 36187, 36191, 36209,
+ 36217, 36229, 36241, 36251, 36263, 36269, 36277, 36293, 36299,
+ 36307, 36313, 36319, 36341, 36343, 36353, 36373, 36383, 36389,
+ 36433, 36451, 36457, 36467, 36469, 36473, 36479, 36493, 36497,
+ 36523, 36527, 36529, 36541, 36551, 36559, 36563, 36571, 36583,
+ 36587, 36599, 36607, 36629, 36637, 36643, 36653, 36671, 36677,
+ 36683, 36691, 36697, 36709, 36713, 36721, 36739, 36749, 36761,
+ 36767, 36779, 36781, 36787, 36791, 36793, 36809, 36821, 36833,
+ 36847, 36857, 36871, 36877, 36887, 36899, 36901, 36913, 36919,
+ 36923, 36929, 36931, 36943, 36947, 36973, 36979, 36997, 37003,
+ 37013, 37019, 37021, 37039, 37049, 37057, 37061, 37087, 37097,
+ 37117, 37123, 37139, 37159, 37171, 37181, 37189, 37199, 37201,
+ 37217, 37223, 37243, 37253, 37273, 37277, 37307, 37309, 37313,
+ 37321, 37337, 37339, 37357, 37361, 37363, 37369, 37379, 37397,
+ 37409, 37423, 37441, 37447, 37463, 37483, 37489, 37493, 37501,
+ 37507, 37511, 37517, 37529, 37537, 37547, 37549, 37561, 37567,
+ 37571, 37573, 37579, 37589, 37591, 37607, 37619, 37633, 37643,
+ 37649, 37657, 37663, 37691, 37693, 37699, 37717, 37747, 37781,
+ 37783, 37799, 37811, 37813, 37831, 37847, 37853, 37861, 37871,
+ 37879, 37889, 37897, 37907, 37951, 37957, 37963, 37967, 37987,
+ 37991, 37993, 37997, 38011, 38039, 38047, 38053, 38069, 38083,
+ 38113, 38119, 38149, 38153, 38167, 38177, 38183, 38189, 38197,
+ 38201, 38219, 38231, 38237, 38239, 38261, 38273, 38281, 38287,
+ 38299, 38303, 38317, 38321, 38327, 38329, 38333, 38351, 38371,
+ 38377, 38393, 38431, 38447, 38449, 38453, 38459, 38461, 38501,
+ 38543, 38557, 38561, 38567, 38569, 38593, 38603, 38609, 38611,
+ 38629, 38639, 38651, 38653, 38669, 38671, 38677, 38693, 38699,
+ 38707, 38711, 38713, 38723, 38729, 38737, 38747, 38749, 38767,
+ 38783, 38791, 38803, 38821, 38833, 38839, 38851, 38861, 38867,
+ 38873, 38891, 38903, 38917, 38921, 38923, 38933, 38953, 38959,
+ 38971, 38977, 38993, 39019, 39023, 39041, 39043, 39047, 39079,
+ 39089, 39097, 39103, 39107, 39113, 39119, 39133, 39139, 39157,
+ 39161, 39163, 39181, 39191, 39199, 39209, 39217, 39227, 39229,
+ 39233, 39239, 39241, 39251, 39293, 39301, 39313, 39317, 39323,
+ 39341, 39343, 39359, 39367, 39371, 39373, 39383, 39397, 39409,
+ 39419, 39439, 39443, 39451, 39461, 39499, 39503, 39509, 39511,
+ 39521, 39541, 39551, 39563, 39569, 39581, 39607, 39619, 39623,
+ 39631, 39659, 39667, 39671, 39679, 39703, 39709, 39719, 39727,
+ 39733, 39749, 39761, 39769, 39779, 39791, 39799, 39821, 39827,
+ 39829, 39839, 39841, 39847, 39857, 39863, 39869, 39877, 39883,
+ 39887, 39901, 39929, 39937, 39953, 39971, 39979, 39983, 39989,
+ 40009, 40013, 40031, 40037, 40039, 40063, 40087, 40093, 40099,
+ 40111, 40123, 40127, 40129, 40151, 40153, 40163, 40169, 40177,
+ 40189, 40193, 40213, 40231, 40237, 40241, 40253, 40277, 40283,
+ 40289, 40343, 40351, 40357, 40361, 40387, 40423, 40427, 40429,
+ 40433, 40459, 40471, 40483, 40487, 40493, 40499, 40507, 40519,
+ 40529, 40531, 40543, 40559, 40577, 40583, 40591, 40597, 40609,
+ 40627, 40637, 40639, 40693, 40697, 40699, 40709, 40739, 40751,
+ 40759, 40763, 40771, 40787, 40801, 40813, 40819, 40823, 40829,
+ 40841, 40847, 40849, 40853, 40867, 40879, 40883, 40897, 40903,
+ 40927, 40933, 40939, 40949, 40961, 40973, 40993, 41011, 41017,
+ 41023, 41039, 41047, 41051, 41057, 41077, 41081, 41113, 41117,
+ 41131, 41141, 41143, 41149, 41161, 41177, 41179, 41183, 41189,
+ 41201, 41203, 41213, 41221, 41227, 41231, 41233, 41243, 41257,
+ 41263, 41269, 41281, 41299, 41333, 41341, 41351, 41357, 41381,
+ 41387, 41389, 41399, 41411, 41413, 41443, 41453, 41467, 41479,
+ 41491, 41507, 41513, 41519, 41521, 41539, 41543, 41549, 41579,
+ 41593, 41597, 41603, 41609, 41611, 41617, 41621, 41627, 41641,
+ 41647, 41651, 41659, 41669, 41681, 41687, 41719, 41729, 41737,
+ 41759, 41761, 41771, 41777, 41801, 41809, 41813, 41843, 41849,
+ 41851, 41863, 41879, 41887, 41893, 41897, 41903, 41911, 41927,
+ 41941, 41947, 41953, 41957, 41959, 41969, 41981, 41983, 41999,
+ 42013, 42017, 42019, 42023, 42043, 42061, 42071, 42073, 42083,
+ 42089, 42101, 42131, 42139, 42157, 42169, 42179, 42181, 42187,
+ 42193, 42197, 42209, 42221, 42223, 42227, 42239, 42257, 42281,
+ 42283, 42293, 42299, 42307, 42323, 42331, 42337, 42349, 42359,
+ 42373, 42379, 42391, 42397, 42403, 42407, 42409, 42433, 42437,
+ 42443, 42451, 42457, 42461, 42463, 42467, 42473, 42487, 42491,
+ 42499, 42509, 42533, 42557, 42569, 42571, 42577, 42589, 42611,
+ 42641, 42643, 42649, 42667, 42677, 42683, 42689, 42697, 42701,
+ 42703, 42709, 42719, 42727, 42737, 42743, 42751, 42767, 42773,
+ 42787, 42793, 42797, 42821, 42829, 42839, 42841, 42853, 42859,
+ 42863, 42899, 42901, 42923, 42929, 42937, 42943, 42953, 42961,
+ 42967, 42979, 42989, 43003, 43013, 43019, 43037, 43049, 43051,
+ 43063, 43067, 43093, 43103, 43117, 43133, 43151, 43159, 43177,
+ 43189, 43201, 43207, 43223, 43237, 43261, 43271, 43283, 43291,
+ 43313, 43319, 43321, 43331, 43391, 43397, 43399, 43403, 43411,
+ 43427, 43441, 43451, 43457, 43481, 43487, 43499, 43517, 43541,
+ 43543, 43573, 43577, 43579, 43591, 43597, 43607, 43609, 43613,
+ 43627, 43633, 43649, 43651, 43661, 43669, 43691, 43711, 43717,
+ 43721, 43753, 43759, 43777, 43781, 43783, 43787, 43789, 43793,
+ 43801, 43853, 43867, 43889, 43891, 43913, 43933, 43943, 43951,
+ 43961, 43963, 43969, 43973, 43987, 43991, 43997, 44017, 44021,
+ 44027, 44029, 44041, 44053, 44059, 44071, 44087, 44089, 44101,
+ 44111, 44119, 44123, 44129, 44131, 44159, 44171, 44179, 44189,
+ 44201, 44203, 44207, 44221, 44249, 44257, 44263, 44267, 44269,
+ 44273, 44279, 44281, 44293, 44351, 44357, 44371, 44381, 44383,
+ 44389, 44417, 44449, 44453, 44483, 44491, 44497, 44501, 44507,
+ 44519, 44531, 44533, 44537, 44543, 44549, 44563, 44579, 44587,
+ 44617, 44621, 44623, 44633, 44641, 44647, 44651, 44657, 44683,
+ 44687, 44699, 44701, 44711, 44729, 44741, 44753, 44771, 44773,
+ 44777, 44789, 44797, 44809, 44819, 44839, 44843, 44851, 44867,
+ 44879, 44887, 44893, 44909, 44917, 44927, 44939, 44953, 44959,
+ 44963, 44971, 44983, 44987, 45007, 45013, 45053, 45061, 45077,
+ 45083, 45119, 45121, 45127, 45131, 45137, 45139, 45161, 45179,
+ 45181, 45191, 45197, 45233, 45247, 45259, 45263, 45281, 45289,
+ 45293, 45307, 45317, 45319, 45329, 45337, 45341, 45343, 45361,
+ 45377, 45389, 45403, 45413, 45427, 45433, 45439, 45481, 45491,
+ 45497, 45503, 45523, 45533, 45541, 45553, 45557, 45569, 45587,
+ 45589, 45599, 45613, 45631, 45641, 45659, 45667, 45673, 45677,
+ 45691, 45697, 45707, 45737, 45751, 45757, 45763, 45767, 45779,
+ 45817, 45821, 45823, 45827, 45833, 45841, 45853, 45863, 45869,
+ 45887, 45893, 45943, 45949, 45953, 45959, 45971, 45979, 45989,
+ 46021, 46027, 46049, 46051, 46061, 46073, 46091, 46093, 46099,
+ 46103, 46133, 46141, 46147, 46153, 46171, 46181, 46183, 46187,
+ 46199, 46219, 46229, 46237, 46261, 46271, 46273, 46279, 46301,
+ 46307, 46309, 46327, 46337, 46349, 46351, 46381, 46399, 46411,
+ 46439, 46441, 46447, 46451, 46457, 46471, 46477, 46489, 46499,
+ 46507, 46511, 46523, 46549, 46559, 46567, 46573, 46589, 46591,
+ 46601, 46619, 46633, 46639, 46643, 46649, 46663, 46679, 46681,
+ 46687, 46691, 46703, 46723, 46727, 46747, 46751, 46757, 46769,
+ 46771, 46807, 46811, 46817, 46819, 46829, 46831, 46853, 46861,
+ 46867, 46877, 46889, 46901, 46919, 46933, 46957, 46993, 46997,
+ 47017, 47041, 47051, 47057, 47059, 47087, 47093, 47111, 47119,
+ 47123, 47129, 47137, 47143, 47147, 47149, 47161, 47189, 47207,
+ 47221, 47237, 47251, 47269, 47279, 47287, 47293, 47297, 47303,
+ 47309, 47317, 47339, 47351, 47353, 47363, 47381, 47387, 47389,
+ 47407, 47417, 47419, 47431, 47441, 47459, 47491, 47497, 47501,
+ 47507, 47513, 47521, 47527, 47533, 47543, 47563, 47569, 47581,
+ 47591, 47599, 47609, 47623, 47629, 47639, 47653, 47657, 47659,
+ 47681, 47699, 47701, 47711, 47713, 47717, 47737, 47741, 47743,
+ 47777, 47779, 47791, 47797, 47807, 47809, 47819, 47837, 47843,
+ 47857, 47869, 47881, 47903, 47911, 47917, 47933, 47939, 47947,
+ 47951, 47963, 47969, 47977, 47981, 48017, 48023, 48029, 48049,
+ 48073, 48079, 48091, 48109, 48119, 48121, 48131, 48157, 48163,
+ 48179, 48187, 48193, 48197, 48221, 48239, 48247, 48259, 48271,
+ 48281, 48299, 48311, 48313, 48337, 48341, 48353, 48371, 48383,
+ 48397, 48407, 48409, 48413, 48437, 48449, 48463, 48473, 48479,
+ 48481, 48487, 48491, 48497, 48523, 48527, 48533, 48539, 48541,
+ 48563, 48571, 48589, 48593, 48611, 48619, 48623, 48647, 48649,
+ 48661, 48673, 48677, 48679, 48731, 48733, 48751, 48757, 48761,
+ 48767, 48779, 48781, 48787, 48799, 48809, 48817, 48821, 48823,
+ 48847, 48857, 48859, 48869, 48871, 48883, 48889, 48907, 48947,
+ 48953, 48973, 48989, 48991, 49003, 49009, 49019, 49031, 49033,
+ 49037, 49043, 49057, 49069, 49081, 49103, 49109, 49117, 49121,
+ 49123, 49139, 49157, 49169, 49171, 49177, 49193, 49199, 49201,
+ 49207, 49211, 49223, 49253, 49261, 49277, 49279, 49297, 49307,
+ 49331, 49333, 49339, 49363, 49367, 49369, 49391, 49393, 49409,
+ 49411, 49417, 49429, 49433, 49451, 49459, 49463, 49477, 49481,
+ 49499, 49523, 49529, 49531, 49537, 49547, 49549, 49559, 49597,
+ 49603, 49613, 49627, 49633, 49639, 49663, 49667, 49669, 49681,
+ 49697, 49711, 49727, 49739, 49741, 49747, 49757, 49783, 49787,
+ 49789, 49801, 49807, 49811, 49823, 49831, 49843, 49853, 49871,
+ 49877, 49891, 49919, 49921, 49927, 49937, 49939, 49943, 49957,
+ 49991, 49993, 49999, 50021, 50023, 50033, 50047, 50051, 50053,
+ 50069, 50077, 50087, 50093, 50101, 50111, 50119, 50123, 50129,
+ 50131, 50147, 50153, 50159, 50177, 50207, 50221, 50227, 50231,
+ 50261, 50263, 50273, 50287, 50291, 50311, 50321, 50329, 50333,
+ 50341, 50359, 50363, 50377, 50383, 50387, 50411, 50417, 50423,
+ 50441, 50459, 50461, 50497, 50503, 50513, 50527, 50539, 50543,
+ 50549, 50551, 50581, 50587, 50591, 50593, 50599, 50627, 50647,
+ 50651, 50671, 50683, 50707, 50723, 50741, 50753, 50767, 50773,
+ 50777, 50789, 50821, 50833, 50839, 50849, 50857, 50867, 50873,
+ 50891, 50893, 50909, 50923, 50929, 50951, 50957, 50969, 50971,
+ 50989, 50993, 51001, 51031, 51043, 51047, 51059, 51061, 51071,
+ 51109, 51131, 51133, 51137, 51151, 51157, 51169, 51193, 51197,
+ 51199, 51203, 51217, 51229, 51239, 51241, 51257, 51263, 51283,
+ 51287, 51307, 51329, 51341, 51343, 51347, 51349, 51361, 51383,
+ 51407, 51413, 51419, 51421, 51427, 51431, 51437, 51439, 51449,
+ 51461, 51473, 51479, 51481, 51487, 51503, 51511, 51517, 51521,
+ 51539, 51551, 51563, 51577, 51581, 51593, 51599, 51607, 51613,
+ 51631, 51637, 51647, 51659, 51673, 51679, 51683, 51691, 51713,
+ 51719, 51721, 51749, 51767, 51769, 51787, 51797, 51803, 51817,
+ 51827, 51829, 51839, 51853, 51859, 51869, 51871, 51893, 51899,
+ 51907, 51913, 51929, 51941, 51949, 51971, 51973, 51977, 51991,
+ 52009, 52021, 52027, 52051, 52057, 52067, 52069, 52081, 52103,
+ 52121, 52127, 52147, 52153, 52163, 52177, 52181, 52183, 52189,
+ 52201, 52223, 52237, 52249, 52253, 52259, 52267, 52289, 52291,
+ 52301, 52313, 52321, 52361, 52363, 52369, 52379, 52387, 52391,
+ 52433, 52453, 52457, 52489, 52501, 52511, 52517, 52529, 52541,
+ 52543, 52553, 52561, 52567, 52571, 52579, 52583, 52609, 52627,
+ 52631, 52639, 52667, 52673, 52691, 52697, 52709, 52711, 52721,
+ 52727, 52733, 52747, 52757, 52769, 52783, 52807, 52813, 52817,
+ 52837, 52859, 52861, 52879, 52883, 52889, 52901, 52903, 52919,
+ 52937, 52951, 52957, 52963, 52967, 52973, 52981, 52999, 53003,
+ 53017, 53047, 53051, 53069, 53077, 53087, 53089, 53093, 53101,
+ 53113, 53117, 53129, 53147, 53149, 53161, 53171, 53173, 53189,
+ 53197, 53201, 53231, 53233, 53239, 53267, 53269, 53279, 53281,
+ 53299, 53309, 53323, 53327, 53353, 53359, 53377, 53381, 53401,
+ 53407, 53411, 53419, 53437, 53441, 53453, 53479, 53503, 53507,
+ 53527, 53549, 53551, 53569, 53591, 53593, 53597, 53609, 53611,
+ 53617, 53623, 53629, 53633, 53639, 53653, 53657, 53681, 53693,
+ 53699, 53717, 53719, 53731, 53759, 53773, 53777, 53783, 53791,
+ 53813, 53819, 53831, 53849, 53857, 53861, 53881, 53887, 53891,
+ 53897, 53899, 53917, 53923, 53927, 53939, 53951, 53959, 53987,
+ 53993, 54001, 54011, 54013, 54037, 54049, 54059, 54083, 54091,
+ 54101, 54121, 54133, 54139, 54151, 54163, 54167, 54181, 54193,
+ 54217, 54251, 54269, 54277, 54287, 54293, 54311, 54319, 54323,
+ 54331, 54347, 54361, 54367, 54371, 54377, 54401, 54403, 54409,
+ 54413, 54419, 54421, 54437, 54443, 54449, 54469, 54493, 54497,
+ 54499, 54503, 54517, 54521, 54539, 54541, 54547, 54559, 54563,
+ 54577, 54581, 54583, 54601, 54617, 54623, 54629, 54631, 54647,
+ 54667, 54673, 54679, 54709, 54713, 54721, 54727, 54751, 54767,
+ 54773, 54779, 54787, 54799, 54829, 54833, 54851, 54869, 54877,
+ 54881, 54907, 54917, 54919, 54941, 54949, 54959, 54973, 54979,
+ 54983, 55001, 55009, 55021, 55049, 55051, 55057, 55061, 55073,
+ 55079, 55103, 55109, 55117, 55127, 55147, 55163, 55171, 55201,
+ 55207, 55213, 55217, 55219, 55229, 55243, 55249, 55259, 55291,
+ 55313, 55331, 55333, 55337, 55339, 55343, 55351, 55373, 55381,
+ 55399, 55411, 55439, 55441, 55457, 55469, 55487, 55501, 55511,
+ 55529, 55541, 55547, 55579, 55589, 55603, 55609, 55619, 55621,
+ 55631, 55633, 55639, 55661, 55663, 55667, 55673, 55681, 55691,
+ 55697, 55711, 55717, 55721, 55733, 55763, 55787, 55793, 55799,
+ 55807, 55813, 55817, 55819, 55823, 55829, 55837, 55843, 55849,
+ 55871, 55889, 55897, 55901, 55903, 55921, 55927, 55931, 55933,
+ 55949, 55967, 55987, 55997, 56003, 56009, 56039, 56041, 56053,
+ 56081, 56087, 56093, 56099, 56101, 56113, 56123, 56131, 56149,
+ 56167, 56171, 56179, 56197, 56207, 56209, 56237, 56239, 56249,
+ 56263, 56267, 56269, 56299, 56311, 56333, 56359, 56369, 56377,
+ 56383, 56393, 56401, 56417, 56431, 56437, 56443, 56453, 56467,
+ 56473, 56477, 56479, 56489, 56501, 56503, 56509, 56519, 56527,
+ 56531, 56533, 56543, 56569, 56591, 56597, 56599, 56611, 56629,
+ 56633, 56659, 56663, 56671, 56681, 56687, 56701, 56711, 56713,
+ 56731, 56737, 56747, 56767, 56773, 56779, 56783, 56807, 56809,
+ 56813, 56821, 56827, 56843, 56857, 56873, 56891, 56893, 56897,
+ 56909, 56911, 56921, 56923, 56929, 56941, 56951, 56957, 56963,
+ 56983, 56989, 56993, 56999, 57037, 57041, 57047, 57059, 57073,
+ 57077, 57089, 57097, 57107, 57119, 57131, 57139, 57143, 57149,
+ 57163, 57173, 57179, 57191, 57193, 57203, 57221, 57223, 57241,
+ 57251, 57259, 57269, 57271, 57283, 57287, 57301, 57329, 57331,
+ 57347, 57349, 57367, 57373, 57383, 57389, 57397, 57413, 57427,
+ 57457, 57467, 57487, 57493, 57503, 57527, 57529, 57557, 57559,
+ 57571, 57587, 57593, 57601, 57637, 57641, 57649, 57653, 57667,
+ 57679, 57689, 57697, 57709, 57713, 57719, 57727, 57731, 57737,
+ 57751, 57773, 57781, 57787, 57791, 57793, 57803, 57809, 57829,
+ 57839, 57847, 57853, 57859, 57881, 57899, 57901, 57917, 57923,
+ 57943, 57947, 57973, 57977, 57991, 58013, 58027, 58031, 58043,
+ 58049, 58057, 58061, 58067, 58073, 58099, 58109, 58111, 58129,
+ 58147, 58151, 58153, 58169, 58171, 58189, 58193, 58199, 58207,
+ 58211, 58217, 58229, 58231, 58237, 58243, 58271, 58309, 58313,
+ 58321, 58337, 58363, 58367, 58369, 58379, 58391, 58393, 58403,
+ 58411, 58417, 58427, 58439, 58441, 58451, 58453, 58477, 58481,
+ 58511, 58537, 58543, 58549, 58567, 58573, 58579, 58601, 58603,
+ 58613, 58631, 58657, 58661, 58679, 58687, 58693, 58699, 58711,
+ 58727, 58733, 58741, 58757, 58763, 58771, 58787, 58789, 58831,
+ 58889, 58897, 58901, 58907, 58909, 58913, 58921, 58937, 58943,
+ 58963, 58967, 58979, 58991, 58997, 59009, 59011, 59021, 59023,
+ 59029, 59051, 59053, 59063, 59069, 59077, 59083, 59093, 59107,
+ 59113, 59119, 59123, 59141, 59149, 59159, 59167, 59183, 59197,
+ 59207, 59209, 59219, 59221, 59233, 59239, 59243, 59263, 59273,
+ 59281, 59333, 59341, 59351, 59357, 59359, 59369, 59377, 59387,
+ 59393, 59399, 59407, 59417, 59419, 59441, 59443, 59447, 59453,
+ 59467, 59471, 59473, 59497, 59509, 59513, 59539, 59557, 59561,
+ 59567, 59581, 59611, 59617, 59621, 59627, 59629, 59651, 59659,
+ 59663, 59669, 59671, 59693, 59699, 59707, 59723, 59729, 59743,
+ 59747, 59753, 59771, 59779, 59791, 59797, 59809, 59833, 59863,
+ 59879, 59887, 59921, 59929, 59951, 59957, 59971, 59981, 59999,
+ 60013, 60017, 60029, 60037, 60041, 60077, 60083, 60089, 60091,
+ 60101, 60103, 60107, 60127, 60133, 60139, 60149, 60161, 60167,
+ 60169, 60209, 60217, 60223, 60251, 60257, 60259, 60271, 60289,
+ 60293, 60317, 60331, 60337, 60343, 60353, 60373, 60383, 60397,
+ 60413, 60427, 60443, 60449, 60457, 60493, 60497, 60509, 60521,
+ 60527, 60539, 60589, 60601, 60607, 60611, 60617, 60623, 60631,
+ 60637, 60647, 60649, 60659, 60661, 60679, 60689, 60703, 60719,
+ 60727, 60733, 60737, 60757, 60761, 60763, 60773, 60779, 60793,
+ 60811, 60821, 60859, 60869, 60887, 60889, 60899, 60901, 60913,
+ 60917, 60919, 60923, 60937, 60943, 60953, 60961, 61001, 61007,
+ 61027, 61031, 61043, 61051, 61057, 61091, 61099, 61121, 61129,
+ 61141, 61151, 61153, 61169, 61211, 61223, 61231, 61253, 61261,
+ 61283, 61291, 61297, 61331, 61333, 61339, 61343, 61357, 61363,
+ 61379, 61381, 61403, 61409, 61417, 61441, 61463, 61469, 61471,
+ 61483, 61487, 61493, 61507, 61511, 61519, 61543, 61547, 61553,
+ 61559, 61561, 61583, 61603, 61609, 61613, 61627, 61631, 61637,
+ 61643, 61651, 61657, 61667, 61673, 61681, 61687, 61703, 61717,
+ 61723, 61729, 61751, 61757, 61781, 61813, 61819, 61837, 61843,
+ 61861, 61871, 61879, 61909, 61927, 61933, 61949, 61961, 61967,
+ 61979, 61981, 61987, 61991, 62003, 62011, 62017, 62039, 62047,
+ 62053, 62057, 62071, 62081, 62099, 62119, 62129, 62131, 62137,
+ 62141, 62143, 62171, 62189, 62191, 62201, 62207, 62213, 62219,
+ 62233, 62273, 62297, 62299, 62303, 62311, 62323, 62327, 62347,
+ 62351, 62383, 62401, 62417, 62423, 62459, 62467, 62473, 62477,
+ 62483, 62497, 62501, 62507, 62533, 62539, 62549, 62563, 62581,
+ 62591, 62597, 62603, 62617, 62627, 62633, 62639, 62653, 62659,
+ 62683, 62687, 62701, 62723, 62731, 62743, 62753, 62761, 62773,
+ 62791, 62801, 62819, 62827, 62851, 62861, 62869, 62873, 62897,
+ 62903, 62921, 62927, 62929, 62939, 62969, 62971, 62981, 62983,
+ 62987, 62989, 63029, 63031, 63059, 63067, 63073, 63079, 63097,
+ 63103, 63113, 63127, 63131, 63149, 63179, 63197, 63199, 63211,
+ 63241, 63247, 63277, 63281, 63299, 63311, 63313, 63317, 63331,
+ 63337, 63347, 63353, 63361, 63367, 63377, 63389, 63391, 63397,
+ 63409, 63419, 63421, 63439, 63443, 63463, 63467, 63473, 63487,
+ 63493, 63499, 63521, 63527, 63533, 63541, 63559, 63577, 63587,
+ 63589, 63599, 63601, 63607, 63611, 63617, 63629, 63647, 63649,
+ 63659, 63667, 63671, 63689, 63691, 63697, 63703, 63709, 63719,
+ 63727, 63737, 63743, 63761, 63773, 63781, 63793, 63799, 63803,
+ 63809, 63823, 63839, 63841, 63853, 63857, 63863, 63901, 63907,
+ 63913, 63929, 63949, 63977, 63997, 64007, 64013, 64019, 64033,
+ 64037, 64063, 64067, 64081, 64091, 64109, 64123, 64151, 64153,
+ 64157, 64171, 64187, 64189, 64217, 64223, 64231, 64237, 64271,
+ 64279, 64283, 64301, 64303, 64319, 64327, 64333, 64373, 64381,
+ 64399, 64403, 64433, 64439, 64451, 64453, 64483, 64489, 64499,
+ 64513, 64553, 64567, 64577, 64579, 64591, 64601, 64609, 64613,
+ 64621, 64627, 64633, 64661, 64663, 64667, 64679, 64693, 64709,
+ 64717, 64747, 64763, 64781, 64783, 64793, 64811, 64817, 64849,
+ 64853, 64871, 64877, 64879, 64891, 64901, 64919, 64921, 64927,
+ 64937, 64951, 64969, 64997, 65003, 65011, 65027, 65029, 65033,
+ 65053, 65063, 65071, 65089, 65099, 65101, 65111, 65119, 65123,
+ 65129, 65141, 65147, 65167, 65171, 65173, 65179, 65183, 65203,
+ 65213, 65239, 65257, 65267, 65269, 65287, 65293, 65309, 65323,
+ 65327, 65353, 65357, 65371, 65381, 65393, 65407, 65413, 65419,
+ 65423, 65437, 65447, 65449, 65479, 65497, 65519, 65521, 65537,
+ 65539, 65543, 65551, 65557, 65563, 65579, 65581, 65587, 65599,
+ 65609, 65617, 65629, 65633, 65647, 65651, 65657, 65677, 65687,
+ 65699, 65701, 65707, 65713, 65717, 65719, 65729, 65731, 65761,
+ 65777, 65789, 65809, 65827, 65831, 65837, 65839, 65843, 65851,
+ 65867, 65881, 65899, 65921, 65927, 65929, 65951, 65957, 65963,
+ 65981, 65983, 65993, 66029, 66037, 66041, 66047, 66067, 66071,
+ 66083, 66089, 66103, 66107, 66109, 66137, 66161, 66169, 66173,
+ 66179, 66191, 66221, 66239, 66271, 66293, 66301, 66337, 66343,
+ 66347, 66359, 66361, 66373, 66377, 66383, 66403, 66413, 66431,
+ 66449, 66457, 66463, 66467, 66491, 66499, 66509, 66523, 66529,
+ 66533, 66541, 66553, 66569, 66571, 66587, 66593, 66601, 66617,
+ 66629, 66643, 66653, 66683, 66697, 66701, 66713, 66721, 66733,
+ 66739, 66749, 66751, 66763, 66791, 66797, 66809, 66821, 66841,
+ 66851, 66853, 66863, 66877, 66883, 66889, 66919, 66923, 66931,
+ 66943, 66947, 66949, 66959, 66973, 66977, 67003, 67021, 67033,
+ 67043, 67049, 67057, 67061, 67073, 67079, 67103, 67121, 67129,
+ 67139, 67141, 67153, 67157, 67169, 67181, 67187, 67189, 67211,
+ 67213, 67217, 67219, 67231, 67247, 67261, 67271, 67273, 67289,
+ 67307, 67339, 67343, 67349, 67369, 67391, 67399, 67409, 67411,
+ 67421, 67427, 67429, 67433, 67447, 67453, 67477, 67481, 67489,
+ 67493, 67499, 67511, 67523, 67531, 67537, 67547, 67559, 67567,
+ 67577, 67579, 67589, 67601, 67607, 67619, 67631, 67651, 67679,
+ 67699, 67709, 67723, 67733, 67741, 67751, 67757, 67759, 67763,
+ 67777, 67783, 67789, 67801, 67807, 67819, 67829, 67843, 67853,
+ 67867, 67883, 67891, 67901, 67927, 67931, 67933, 67939, 67943,
+ 67957, 67961, 67967, 67979, 67987, 67993, 68023, 68041, 68053,
+ 68059, 68071, 68087, 68099, 68111, 68113, 68141, 68147, 68161,
+ 68171, 68207, 68209, 68213, 68219, 68227, 68239, 68261, 68279,
+ 68281, 68311, 68329, 68351, 68371, 68389, 68399, 68437, 68443,
+ 68447, 68449, 68473, 68477, 68483, 68489, 68491, 68501, 68507,
+ 68521, 68531, 68539, 68543, 68567, 68581, 68597, 68611, 68633,
+ 68639, 68659, 68669, 68683, 68687, 68699, 68711, 68713, 68729,
+ 68737, 68743, 68749, 68767, 68771, 68777, 68791, 68813, 68819,
+ 68821, 68863, 68879, 68881, 68891, 68897, 68899, 68903, 68909,
+ 68917, 68927, 68947, 68963, 68993, 69001, 69011, 69019, 69029,
+ 69031, 69061, 69067, 69073, 69109, 69119, 69127, 69143, 69149,
+ 69151, 69163, 69191, 69193, 69197, 69203, 69221, 69233, 69239,
+ 69247, 69257, 69259, 69263, 69313, 69317, 69337, 69341, 69371,
+ 69379, 69383, 69389, 69401, 69403, 69427, 69431, 69439, 69457,
+ 69463, 69467, 69473, 69481, 69491, 69493, 69497, 69499, 69539,
+ 69557, 69593, 69623, 69653, 69661, 69677, 69691, 69697, 69709,
+ 69737, 69739, 69761, 69763, 69767, 69779, 69809, 69821, 69827,
+ 69829, 69833, 69847, 69857, 69859, 69877, 69899, 69911, 69929,
+ 69931, 69941, 69959, 69991, 69997, 70001, 70003, 70009, 70019,
+ 70039, 70051, 70061, 70067, 70079, 70099, 70111, 70117, 70121,
+ 70123, 70139, 70141, 70157, 70163, 70177, 70181, 70183, 70199,
+ 70201, 70207, 70223, 70229, 70237, 70241, 70249, 70271, 70289,
+ 70297, 70309, 70313, 70321, 70327, 70351, 70373, 70379, 70381,
+ 70393, 70423, 70429, 70439, 70451, 70457, 70459, 70481, 70487,
+ 70489, 70501, 70507, 70529, 70537, 70549, 70571, 70573, 70583,
+ 70589, 70607, 70619, 70621, 70627, 70639, 70657, 70663, 70667,
+ 70687, 70709, 70717, 70729, 70753, 70769, 70783, 70793, 70823,
+ 70841, 70843, 70849, 70853, 70867, 70877, 70879, 70891, 70901,
+ 70913, 70919, 70921, 70937, 70949, 70951, 70957, 70969, 70979,
+ 70981, 70991, 70997, 70999, 71011, 71023, 71039, 71059, 71069,
+ 71081, 71089, 71119, 71129, 71143, 71147, 71153, 71161, 71167,
+ 71171, 71191, 71209, 71233, 71237, 71249, 71257, 71261, 71263,
+ 71287, 71293, 71317, 71327, 71329, 71333, 71339, 71341, 71347,
+ 71353, 71359, 71363, 71387, 71389, 71399, 71411, 71413, 71419,
+ 71429, 71437, 71443, 71453, 71471, 71473, 71479, 71483, 71503,
+ 71527, 71537, 71549, 71551, 71563, 71569, 71593, 71597, 71633,
+ 71647, 71663, 71671, 71693, 71699, 71707, 71711, 71713, 71719,
+ 71741, 71761, 71777, 71789, 71807, 71809, 71821, 71837, 71843,
+ 71849, 71861, 71867, 71879, 71881, 71887, 71899, 71909, 71917,
+ 71933, 71941, 71947, 71963, 71971, 71983, 71987, 71993, 71999,
+ 72019, 72031, 72043, 72047, 72053, 72073, 72077, 72089, 72091,
+ 72101, 72103, 72109, 72139, 72161, 72167, 72169, 72173, 72211,
+ 72221, 72223, 72227, 72229, 72251, 72253, 72269, 72271, 72277,
+ 72287, 72307, 72313, 72337, 72341, 72353, 72367, 72379, 72383,
+ 72421, 72431, 72461, 72467, 72469, 72481, 72493, 72497, 72503,
+ 72533, 72547, 72551, 72559, 72577, 72613, 72617, 72623, 72643,
+ 72647, 72649, 72661, 72671, 72673, 72679, 72689, 72701, 72707,
+ 72719, 72727, 72733, 72739, 72763, 72767, 72797, 72817, 72823,
+ 72859, 72869, 72871, 72883, 72889, 72893, 72901, 72907, 72911,
+ 72923, 72931, 72937, 72949, 72953, 72959, 72973, 72977, 72997,
+ 73009, 73013, 73019, 73037, 73039, 73043, 73061, 73063, 73079,
+ 73091, 73121, 73127, 73133, 73141, 73181, 73189, 73237, 73243,
+ 73259, 73277, 73291, 73303, 73309, 73327, 73331, 73351, 73361,
+ 73363, 73369, 73379, 73387, 73417, 73421, 73433, 73453, 73459,
+ 73471, 73477, 73483, 73517, 73523, 73529, 73547, 73553, 73561,
+ 73571, 73583, 73589, 73597, 73607, 73609, 73613, 73637, 73643,
+ 73651, 73673, 73679, 73681, 73693, 73699, 73709, 73721, 73727,
+ 73751, 73757, 73771, 73783, 73819, 73823, 73847, 73849, 73859,
+ 73867, 73877, 73883, 73897, 73907, 73939, 73943, 73951, 73961,
+ 73973, 73999, 74017, 74021, 74027, 74047, 74051, 74071, 74077,
+ 74093, 74099, 74101, 74131, 74143, 74149, 74159, 74161, 74167,
+ 74177, 74189, 74197, 74201, 74203, 74209, 74219, 74231, 74257,
+ 74279, 74287, 74293, 74297, 74311, 74317, 74323, 74353, 74357,
+ 74363, 74377, 74381, 74383, 74411, 74413, 74419, 74441, 74449,
+ 74453, 74471, 74489, 74507, 74509, 74521, 74527, 74531, 74551,
+ 74561, 74567, 74573, 74587, 74597, 74609, 74611, 74623, 74653,
+ 74687, 74699, 74707, 74713, 74717, 74719, 74729, 74731, 74747,
+ 74759, 74761, 74771, 74779, 74797, 74821, 74827, 74831, 74843,
+ 74857, 74861, 74869, 74873, 74887, 74891, 74897, 74903, 74923,
+ 74929, 74933, 74941, 74959, 75011, 75013, 75017, 75029, 75037,
+ 75041, 75079, 75083, 75109, 75133, 75149, 75161, 75167, 75169,
+ 75181, 75193, 75209, 75211, 75217, 75223, 75227, 75239, 75253,
+ 75269, 75277, 75289, 75307, 75323, 75329, 75337, 75347, 75353,
+ 75367, 75377, 75389, 75391, 75401, 75403, 75407, 75431, 75437,
+ 75479, 75503, 75511, 75521, 75527, 75533, 75539, 75541, 75553,
+ 75557, 75571, 75577, 75583, 75611, 75617, 75619, 75629, 75641,
+ 75653, 75659, 75679, 75683, 75689, 75703, 75707, 75709, 75721,
+ 75731, 75743, 75767, 75773, 75781, 75787, 75793, 75797, 75821,
+ 75833, 75853, 75869, 75883, 75913, 75931, 75937, 75941, 75967,
+ 75979, 75983, 75989, 75991, 75997, 76001, 76003, 76031, 76039,
+ 76079, 76081, 76091, 76099, 76103, 76123, 76129, 76147, 76157,
+ 76159, 76163, 76207, 76213, 76231, 76243, 76249, 76253, 76259,
+ 76261, 76283, 76289, 76303, 76333, 76343, 76367, 76369, 76379,
+ 76387, 76403, 76421, 76423, 76441, 76463, 76471, 76481, 76487,
+ 76493, 76507, 76511, 76519, 76537, 76541, 76543, 76561, 76579,
+ 76597, 76603, 76607, 76631, 76649, 76651, 76667, 76673, 76679,
+ 76697, 76717, 76733, 76753, 76757, 76771, 76777, 76781, 76801,
+ 76819, 76829, 76831, 76837, 76847, 76871, 76873, 76883, 76907,
+ 76913, 76919, 76943, 76949, 76961, 76963, 76991, 77003, 77017,
+ 77023, 77029, 77041, 77047, 77069, 77081, 77093, 77101, 77137,
+ 77141, 77153, 77167, 77171, 77191, 77201, 77213, 77237, 77239,
+ 77243, 77249, 77261, 77263, 77267, 77269, 77279, 77291, 77317,
+ 77323, 77339, 77347, 77351, 77359, 77369, 77377, 77383, 77417,
+ 77419, 77431, 77447, 77471, 77477, 77479, 77489, 77491, 77509,
+ 77513, 77521, 77527, 77543, 77549, 77551, 77557, 77563, 77569,
+ 77573, 77587, 77591, 77611, 77617, 77621, 77641, 77647, 77659,
+ 77681, 77687, 77689, 77699, 77711, 77713, 77719, 77723, 77731,
+ 77743, 77747, 77761, 77773, 77783, 77797, 77801, 77813, 77839,
+ 77849, 77863, 77867, 77893, 77899, 77929, 77933, 77951, 77969,
+ 77977, 77983, 77999, 78007, 78017, 78031, 78041, 78049, 78059,
+ 78079, 78101, 78121, 78137, 78139, 78157, 78163, 78167, 78173,
+ 78179, 78191, 78193, 78203, 78229, 78233, 78241, 78259, 78277,
+ 78283, 78301, 78307, 78311, 78317, 78341, 78347, 78367, 78401,
+ 78427, 78437, 78439, 78467, 78479, 78487, 78497, 78509, 78511,
+ 78517, 78539, 78541, 78553, 78569, 78571, 78577, 78583, 78593,
+ 78607, 78623, 78643, 78649, 78653, 78691, 78697, 78707, 78713,
+ 78721, 78737, 78779, 78781, 78787, 78791, 78797, 78803, 78809,
+ 78823, 78839, 78853, 78857, 78877, 78887, 78889, 78893, 78901,
+ 78919, 78929, 78941, 78977, 78979, 78989, 79031, 79039, 79043,
+ 79063, 79087, 79103, 79111, 79133, 79139, 79147, 79151, 79153,
+ 79159, 79181, 79187, 79193, 79201, 79229, 79231, 79241, 79259,
+ 79273, 79279, 79283, 79301, 79309, 79319, 79333, 79337, 79349,
+ 79357, 79367, 79379, 79393, 79397, 79399, 79411, 79423, 79427,
+ 79433, 79451, 79481, 79493, 79531, 79537, 79549, 79559, 79561,
+ 79579, 79589, 79601, 79609, 79613, 79621, 79627, 79631, 79633,
+ 79657, 79669, 79687, 79691, 79693, 79697, 79699, 79757, 79769,
+ 79777, 79801, 79811, 79813, 79817, 79823, 79829, 79841, 79843,
+ 79847, 79861, 79867, 79873, 79889, 79901, 79903, 79907, 79939,
+ 79943, 79967, 79973, 79979, 79987, 79997, 79999, 80021, 80039,
+ 80051, 80071, 80077, 80107, 80111, 80141, 80147, 80149, 80153,
+ 80167, 80173, 80177, 80191, 80207, 80209, 80221, 80231, 80233,
+ 80239, 80251, 80263, 80273, 80279, 80287, 80309, 80317, 80329,
+ 80341, 80347, 80363, 80369, 80387, 80407, 80429, 80447, 80449,
+ 80471, 80473, 80489, 80491, 80513, 80527, 80537, 80557, 80567,
+ 80599, 80603, 80611, 80621, 80627, 80629, 80651, 80657, 80669,
+ 80671, 80677, 80681, 80683, 80687, 80701, 80713, 80737, 80747,
+ 80749, 80761, 80777, 80779, 80783, 80789, 80803, 80809, 80819,
+ 80831, 80833, 80849, 80863, 80897, 80909, 80911, 80917, 80923,
+ 80929, 80933, 80953, 80963, 80989, 81001, 81013, 81017, 81019,
+ 81023, 81031, 81041, 81043, 81047, 81049, 81071, 81077, 81083,
+ 81097, 81101, 81119, 81131, 81157, 81163, 81173, 81181, 81197,
+ 81199, 81203, 81223, 81233, 81239, 81281, 81283, 81293, 81299,
+ 81307, 81331, 81343, 81349, 81353, 81359, 81371, 81373, 81401,
+ 81409, 81421, 81439, 81457, 81463, 81509, 81517, 81527, 81533,
+ 81547, 81551, 81553, 81559, 81563, 81569, 81611, 81619, 81629,
+ 81637, 81647, 81649, 81667, 81671, 81677, 81689, 81701, 81703,
+ 81707, 81727, 81737, 81749, 81761, 81769, 81773, 81799, 81817,
+ 81839, 81847, 81853, 81869, 81883, 81899, 81901, 81919, 81929,
+ 81931, 81937, 81943, 81953, 81967, 81971, 81973, 82003, 82007,
+ 82009, 82013, 82021, 82031, 82037, 82039, 82051, 82067, 82073,
+ 82129, 82139, 82141, 82153, 82163, 82171, 82183, 82189, 82193,
+ 82207, 82217, 82219, 82223, 82231, 82237, 82241, 82261, 82267,
+ 82279, 82301, 82307, 82339, 82349, 82351, 82361, 82373, 82387,
+ 82393, 82421, 82457, 82463, 82469, 82471, 82483, 82487, 82493,
+ 82499, 82507, 82529, 82531, 82549, 82559, 82561, 82567, 82571,
+ 82591, 82601, 82609, 82613, 82619, 82633, 82651, 82657, 82699,
+ 82721, 82723, 82727, 82729, 82757, 82759, 82763, 82781, 82787,
+ 82793, 82799, 82811, 82813, 82837, 82847, 82883, 82889, 82891,
+ 82903, 82913, 82939, 82963, 82981, 82997, 83003, 83009, 83023,
+ 83047, 83059, 83063, 83071, 83077, 83089, 83093, 83101, 83117,
+ 83137, 83177, 83203, 83207, 83219, 83221, 83227, 83231, 83233,
+ 83243, 83257, 83267, 83269, 83273, 83299, 83311, 83339, 83341,
+ 83357, 83383, 83389, 83399, 83401, 83407, 83417, 83423, 83431,
+ 83437, 83443, 83449, 83459, 83471, 83477, 83497, 83537, 83557,
+ 83561, 83563, 83579, 83591, 83597, 83609, 83617, 83621, 83639,
+ 83641, 83653, 83663, 83689, 83701, 83717, 83719, 83737, 83761,
+ 83773, 83777, 83791, 83813, 83833, 83843, 83857, 83869, 83873,
+ 83891, 83903, 83911, 83921, 83933, 83939, 83969, 83983, 83987,
+ 84011, 84017, 84047, 84053, 84059, 84061, 84067, 84089, 84121,
+ 84127, 84131, 84137, 84143, 84163, 84179, 84181, 84191, 84199,
+ 84211, 84221, 84223, 84229, 84239, 84247, 84263, 84299, 84307,
+ 84313, 84317, 84319, 84347, 84349, 84377, 84389, 84391, 84401,
+ 84407, 84421, 84431, 84437, 84443, 84449, 84457, 84463, 84467,
+ 84481, 84499, 84503, 84509, 84521, 84523, 84533, 84551, 84559,
+ 84589, 84629, 84631, 84649, 84653, 84659, 84673, 84691, 84697,
+ 84701, 84713, 84719, 84731, 84737, 84751, 84761, 84787, 84793,
+ 84809, 84811, 84827, 84857, 84859, 84869, 84871, 84913, 84919,
+ 84947, 84961, 84967, 84977, 84979, 84991, 85009, 85021, 85027,
+ 85037, 85049, 85061, 85081, 85087, 85091, 85093, 85103, 85109,
+ 85121, 85133, 85147, 85159, 85193, 85199, 85201, 85213, 85223,
+ 85229, 85237, 85243, 85247, 85259, 85297, 85303, 85313, 85331,
+ 85333, 85361, 85363, 85369, 85381, 85411, 85427, 85429, 85439,
+ 85447, 85451, 85453, 85469, 85487, 85513, 85517, 85523, 85531,
+ 85549, 85571, 85577, 85597, 85601, 85607, 85619, 85621, 85627,
+ 85639, 85643, 85661, 85667, 85669, 85691, 85703, 85711, 85717,
+ 85733, 85751, 85781, 85793, 85817, 85819, 85829, 85831, 85837,
+ 85843, 85847, 85853, 85889, 85903, 85909, 85931, 85933, 85991,
+ 85999, 86011, 86017, 86027, 86029, 86069, 86077, 86083, 86111,
+ 86113, 86117, 86131, 86137, 86143, 86161, 86171, 86179, 86183,
+ 86197, 86201, 86209, 86239, 86243, 86249, 86257, 86263, 86269,
+ 86287, 86291, 86293, 86297, 86311, 86323, 86341, 86351, 86353,
+ 86357, 86369, 86371, 86381, 86389, 86399, 86413, 86423, 86441,
+ 86453, 86461, 86467, 86477, 86491, 86501, 86509, 86531, 86533,
+ 86539, 86561, 86573, 86579, 86587, 86599, 86627, 86629, 86677,
+ 86689, 86693, 86711, 86719, 86729, 86743, 86753, 86767, 86771,
+ 86783, 86813, 86837, 86843, 86851, 86857, 86861, 86869, 86923,
+ 86927, 86929, 86939, 86951, 86959, 86969, 86981, 86993, 87011,
+ 87013, 87037, 87041, 87049, 87071, 87083, 87103, 87107, 87119,
+ 87121, 87133, 87149, 87151, 87179, 87181, 87187, 87211, 87221,
+ 87223, 87251, 87253, 87257, 87277, 87281, 87293, 87299, 87313,
+ 87317, 87323, 87337, 87359, 87383, 87403, 87407, 87421, 87427,
+ 87433, 87443, 87473, 87481, 87491, 87509, 87511, 87517, 87523,
+ 87539, 87541, 87547, 87553, 87557, 87559, 87583, 87587, 87589,
+ 87613, 87623, 87629, 87631, 87641, 87643, 87649, 87671, 87679,
+ 87683, 87691, 87697, 87701, 87719, 87721, 87739, 87743, 87751,
+ 87767, 87793, 87797, 87803, 87811, 87833, 87853, 87869, 87877,
+ 87881, 87887, 87911, 87917, 87931, 87943, 87959, 87961, 87973,
+ 87977, 87991, 88001, 88003, 88007, 88019, 88037, 88069, 88079,
+ 88093, 88117, 88129, 88169, 88177, 88211, 88223, 88237, 88241,
+ 88259, 88261, 88289, 88301, 88321, 88327, 88337, 88339, 88379,
+ 88397, 88411, 88423, 88427, 88463, 88469, 88471, 88493, 88499,
+ 88513, 88523, 88547, 88589, 88591, 88607, 88609, 88643, 88651,
+ 88657, 88661, 88663, 88667, 88681, 88721, 88729, 88741, 88747,
+ 88771, 88789, 88793, 88799, 88801, 88807, 88811, 88813, 88817,
+ 88819, 88843, 88853, 88861, 88867, 88873, 88883, 88897, 88903,
+ 88919, 88937, 88951, 88969, 88993, 88997, 89003, 89009, 89017,
+ 89021, 89041, 89051, 89057, 89069, 89071, 89083, 89087, 89101,
+ 89107, 89113, 89119, 89123, 89137, 89153, 89189, 89203, 89209,
+ 89213, 89227, 89231, 89237, 89261, 89269, 89273, 89293, 89303,
+ 89317, 89329, 89363, 89371, 89381, 89387, 89393, 89399, 89413,
+ 89417, 89431, 89443, 89449, 89459, 89477, 89491, 89501, 89513,
+ 89519, 89521, 89527, 89533, 89561, 89563, 89567, 89591, 89597,
+ 89599, 89603, 89611, 89627, 89633, 89653, 89657, 89659, 89669,
+ 89671, 89681, 89689, 89753, 89759, 89767, 89779, 89783, 89797,
+ 89809, 89819, 89821, 89833, 89839, 89849, 89867, 89891, 89897,
+ 89899, 89909, 89917, 89923, 89939, 89959, 89963, 89977, 89983,
+ 89989, 90001, 90007, 90011, 90017, 90019, 90023, 90031, 90053,
+ 90059, 90067, 90071, 90073, 90089, 90107, 90121, 90127, 90149,
+ 90163, 90173, 90187, 90191, 90197, 90199, 90203, 90217, 90227,
+ 90239, 90247, 90263, 90271, 90281, 90289, 90313, 90353, 90359,
+ 90371, 90373, 90379, 90397, 90401, 90403, 90407, 90437, 90439,
+ 90469, 90473, 90481, 90499, 90511, 90523, 90527, 90529, 90533,
+ 90547, 90583, 90599, 90617, 90619, 90631, 90641, 90647, 90659,
+ 90677, 90679, 90697, 90703, 90709, 90731, 90749, 90787, 90793,
+ 90803, 90821, 90823, 90833, 90841, 90847, 90863, 90887, 90901,
+ 90907, 90911, 90917, 90931, 90947, 90971, 90977, 90989, 90997,
+ 91009, 91019, 91033, 91079, 91081, 91097, 91099, 91121, 91127,
+ 91129, 91139, 91141, 91151, 91153, 91159, 91163, 91183, 91193,
+ 91199, 91229, 91237, 91243, 91249, 91253, 91283, 91291, 91297,
+ 91303, 91309, 91331, 91367, 91369, 91373, 91381, 91387, 91393,
+ 91397, 91411, 91423, 91433, 91453, 91457, 91459, 91463, 91493,
+ 91499, 91513, 91529, 91541, 91571, 91573, 91577, 91583, 91591,
+ 91621, 91631, 91639, 91673, 91691, 91703, 91711, 91733, 91753,
+ 91757, 91771, 91781, 91801, 91807, 91811, 91813, 91823, 91837,
+ 91841, 91867, 91873, 91909, 91921, 91939, 91943, 91951, 91957,
+ 91961, 91967, 91969, 91997, 92003, 92009, 92033, 92041, 92051,
+ 92077, 92083, 92107, 92111, 92119, 92143, 92153, 92173, 92177,
+ 92179, 92189, 92203, 92219, 92221, 92227, 92233, 92237, 92243,
+ 92251, 92269, 92297, 92311, 92317, 92333, 92347, 92353, 92357,
+ 92363, 92369, 92377, 92381, 92383, 92387, 92399, 92401, 92413,
+ 92419, 92431, 92459, 92461, 92467, 92479, 92489, 92503, 92507,
+ 92551, 92557, 92567, 92569, 92581, 92593, 92623, 92627, 92639,
+ 92641, 92647, 92657, 92669, 92671, 92681, 92683, 92693, 92699,
+ 92707, 92717, 92723, 92737, 92753, 92761, 92767, 92779, 92789,
+ 92791, 92801, 92809, 92821, 92831, 92849, 92857, 92861, 92863,
+ 92867, 92893, 92899, 92921, 92927, 92941, 92951, 92957, 92959,
+ 92987, 92993, 93001, 93047, 93053, 93059, 93077, 93083, 93089,
+ 93097, 93103, 93113, 93131, 93133, 93139, 93151, 93169, 93179,
+ 93187, 93199, 93229, 93239, 93241, 93251, 93253, 93257, 93263,
+ 93281, 93283, 93287, 93307, 93319, 93323, 93329, 93337, 93371,
+ 93377, 93383, 93407, 93419, 93427, 93463, 93479, 93481, 93487,
+ 93491, 93493, 93497, 93503, 93523, 93529, 93553, 93557, 93559,
+ 93563, 93581, 93601, 93607, 93629, 93637, 93683, 93701, 93703,
+ 93719, 93739, 93761, 93763, 93787, 93809, 93811, 93827, 93851,
+ 93871, 93887, 93889, 93893, 93901, 93911, 93913, 93923, 93937,
+ 93941, 93949, 93967, 93971, 93979, 93983, 93997, 94007, 94009,
+ 94033, 94049, 94057, 94063, 94079, 94099, 94109, 94111, 94117,
+ 94121, 94151, 94153, 94169, 94201, 94207, 94219, 94229, 94253,
+ 94261, 94273, 94291, 94307, 94309, 94321, 94327, 94331, 94343,
+ 94349, 94351, 94379, 94397, 94399, 94421, 94427, 94433, 94439,
+ 94441, 94447, 94463, 94477, 94483, 94513, 94529, 94531, 94541,
+ 94543, 94547, 94559, 94561, 94573, 94583, 94597, 94603, 94613,
+ 94621, 94649, 94651, 94687, 94693, 94709, 94723, 94727, 94747,
+ 94771, 94777, 94781, 94789, 94793, 94811, 94819, 94823, 94837,
+ 94841, 94847, 94849, 94873, 94889, 94903, 94907, 94933, 94949,
+ 94951, 94961, 94993, 94999, 95003, 95009, 95021, 95027, 95063,
+ 95071, 95083, 95087, 95089, 95093, 95101, 95107, 95111, 95131,
+ 95143, 95153, 95177, 95189, 95191, 95203, 95213, 95219, 95231,
+ 95233, 95239, 95257, 95261, 95267, 95273, 95279, 95287, 95311,
+ 95317, 95327, 95339, 95369, 95383, 95393, 95401, 95413, 95419,
+ 95429, 95441, 95443, 95461, 95467, 95471, 95479, 95483, 95507,
+ 95527, 95531, 95539, 95549, 95561, 95569, 95581, 95597, 95603,
+ 95617, 95621, 95629, 95633, 95651, 95701, 95707, 95713, 95717,
+ 95723, 95731, 95737, 95747, 95773, 95783, 95789, 95791, 95801,
+ 95803, 95813, 95819, 95857, 95869, 95873, 95881, 95891, 95911,
+ 95917, 95923, 95929, 95947, 95957, 95959, 95971, 95987, 95989,
+ 96001, 96013, 96017, 96043, 96053, 96059, 96079, 96097, 96137,
+ 96149, 96157, 96167, 96179, 96181, 96199, 96211, 96221, 96223,
+ 96233, 96259, 96263, 96269, 96281, 96289, 96293, 96323, 96329,
+ 96331, 96337, 96353, 96377, 96401, 96419, 96431, 96443, 96451,
+ 96457, 96461, 96469, 96479, 96487, 96493, 96497, 96517, 96527,
+ 96553, 96557, 96581, 96587, 96589, 96601, 96643, 96661, 96667,
+ 96671, 96697, 96703, 96731, 96737, 96739, 96749, 96757, 96763,
+ 96769, 96779, 96787, 96797, 96799, 96821, 96823, 96827, 96847,
+ 96851, 96857, 96893, 96907, 96911, 96931, 96953, 96959, 96973,
+ 96979, 96989, 96997, 97001, 97003, 97007, 97021, 97039, 97073,
+ 97081, 97103, 97117, 97127, 97151, 97157, 97159, 97169, 97171,
+ 97177, 97187, 97213, 97231, 97241, 97259, 97283, 97301, 97303,
+ 97327, 97367, 97369, 97373, 97379, 97381, 97387, 97397, 97423,
+ 97429, 97441, 97453, 97459, 97463, 97499, 97501, 97511, 97523,
+ 97547, 97549, 97553, 97561, 97571, 97577, 97579, 97583, 97607,
+ 97609, 97613, 97649, 97651, 97673, 97687, 97711, 97729, 97771,
+ 97777, 97787, 97789, 97813, 97829, 97841, 97843, 97847, 97849,
+ 97859, 97861, 97871, 97879, 97883, 97919, 97927, 97931, 97943,
+ 97961, 97967, 97973, 97987, 98009, 98011, 98017, 98041, 98047,
+ 98057, 98081, 98101, 98123, 98129, 98143, 98179, 98207, 98213,
+ 98221, 98227, 98251, 98257, 98269, 98297, 98299, 98317, 98321,
+ 98323, 98327, 98347, 98369, 98377, 98387, 98389, 98407, 98411,
+ 98419, 98429, 98443, 98453, 98459, 98467, 98473, 98479, 98491,
+ 98507, 98519, 98533, 98543, 98561, 98563, 98573, 98597, 98621,
+ 98627, 98639, 98641, 98663, 98669, 98689, 98711, 98713, 98717,
+ 98729, 98731, 98737, 98773, 98779, 98801, 98807, 98809, 98837,
+ 98849, 98867, 98869, 98873, 98887, 98893, 98897, 98899, 98909,
+ 98911, 98927, 98929, 98939, 98947, 98953, 98963, 98981, 98993,
+ 98999, 99013, 99017, 99023, 99041, 99053, 99079, 99083, 99089,
+ 99103, 99109, 99119, 99131, 99133, 99137, 99139, 99149, 99173,
+ 99181, 99191, 99223, 99233, 99241, 99251, 99257, 99259, 99277,
+ 99289, 99317, 99347, 99349, 99367, 99371, 99377, 99391, 99397,
+ 99401, 99409, 99431, 99439, 99469, 99487, 99497, 99523, 99527,
+ 99529, 99551, 99559, 99563, 99571, 99577, 99581, 99607, 99611,
+ 99623, 99643, 99661, 99667, 99679, 99689, 99707, 99709, 99713,
+ 99719, 99721, 99733, 99761, 99767, 99787, 99793, 99809, 99817,
+ 99823, 99829, 99833, 99839, 99859, 99871, 99877, 99881, 99901,
+ 99907, 99923, 99929, 99961, 99971, 99989, 99991,
+ };
diff --git a/src/rt/isaac/rand.h b/src/rt/isaac/rand.h
new file mode 100644
index 00000000..018496f6
--- /dev/null
+++ b/src/rt/isaac/rand.h
@@ -0,0 +1,56 @@
+/*
+------------------------------------------------------------------------------
+rand.h: definitions for a random number generator
+By Bob Jenkins, 1996, Public Domain
+MODIFIED:
+ 960327: Creation (addition of randinit, really)
+ 970719: use context, not global variables, for internal state
+ 980324: renamed seed to flag
+ 980605: recommend RANDSIZL=4 for noncryptography.
+ 010626: note this is public domain
+------------------------------------------------------------------------------
+*/
+#ifndef STANDARD
+#include "standard.h"
+#endif
+
+#ifndef RAND
+#define RAND
+#define RANDSIZL (8) /* I recommend 8 for crypto, 4 for simulations */
+#define RANDSIZ (1<<RANDSIZL)
+
+/* context of random number generator */
+struct randctx
+{
+ ub4 randcnt;
+ ub4 randrsl[RANDSIZ];
+ ub4 randmem[RANDSIZ];
+ ub4 randa;
+ ub4 randb;
+ ub4 randc;
+};
+typedef struct randctx randctx;
+
+/*
+------------------------------------------------------------------------------
+ If (flag==TRUE), then use the contents of randrsl[0..RANDSIZ-1] as the seed.
+------------------------------------------------------------------------------
+*/
+void randinit(randctx *r, word flag);
+
+void isaac(randctx *r);
+
+
+/*
+------------------------------------------------------------------------------
+ Call rand(/o_ randctx *r _o/) to retrieve a single 32-bit random value
+------------------------------------------------------------------------------
+*/
+#define rand(r) \
+ (!(r)->randcnt-- ? \
+ (isaac(r), (r)->randcnt=RANDSIZ-1, (r)->randrsl[(r)->randcnt]) : \
+ (r)->randrsl[(r)->randcnt])
+
+#endif /* RAND */
+
+
diff --git a/src/rt/isaac/randport.cpp b/src/rt/isaac/randport.cpp
new file mode 100644
index 00000000..45ec590d
--- /dev/null
+++ b/src/rt/isaac/randport.cpp
@@ -0,0 +1,134 @@
+/*
+------------------------------------------------------------------------------
+rand.c: By Bob Jenkins. My random number generator, ISAAC. Public Domain
+MODIFIED:
+ 960327: Creation (addition of randinit, really)
+ 970719: use context, not global variables, for internal state
+ 980324: make a portable version
+ 010626: Note this is public domain
+------------------------------------------------------------------------------
+*/
+#ifndef STANDARD
+#include "standard.h"
+#endif
+#ifndef RAND
+#include "rand.h"
+#endif
+
+
+#define ind(mm,x) ((mm)[(x>>2)&(RANDSIZ-1)])
+#define rngstep(mix,a,b,mm,m,m2,r,x) \
+{ \
+ x = *m; \
+ a = ((a^(mix)) + *(m2++)) & 0xffffffff; \
+ *(m++) = y = (ind(mm,x) + a + b) & 0xffffffff; \
+ *(r++) = b = (ind(mm,y>>RANDSIZL) + x) & 0xffffffff; \
+}
+
+void isaac(randctx *ctx)
+{
+ register ub4 a,b,x,y,*m,*mm,*m2,*r,*mend;
+ mm=ctx->randmem; r=ctx->randrsl;
+ a = ctx->randa; b = (ctx->randb + (++ctx->randc)) & 0xffffffff;
+ for (m = mm, mend = m2 = m+(RANDSIZ/2); m<mend; )
+ {
+ rngstep( a<<13, a, b, mm, m, m2, r, x);
+ rngstep( a>>6 , a, b, mm, m, m2, r, x);
+ rngstep( a<<2 , a, b, mm, m, m2, r, x);
+ rngstep( a>>16, a, b, mm, m, m2, r, x);
+ }
+ for (m2 = mm; m2<mend; )
+ {
+ rngstep( a<<13, a, b, mm, m, m2, r, x);
+ rngstep( a>>6 , a, b, mm, m, m2, r, x);
+ rngstep( a<<2 , a, b, mm, m, m2, r, x);
+ rngstep( a>>16, a, b, mm, m, m2, r, x);
+ }
+ ctx->randb = b; ctx->randa = a;
+}
+
+
+#define mix(a,b,c,d,e,f,g,h) \
+{ \
+ a^=b<<11; d+=a; b+=c; \
+ b^=c>>2; e+=b; c+=d; \
+ c^=d<<8; f+=c; d+=e; \
+ d^=e>>16; g+=d; e+=f; \
+ e^=f<<10; h+=e; f+=g; \
+ f^=g>>4; a+=f; g+=h; \
+ g^=h<<8; b+=g; h+=a; \
+ h^=a>>9; c+=h; a+=b; \
+}
+
+/* if (flag==TRUE), then use the contents of randrsl[] to initialize mm[]. */
+void randinit(randctx *ctx, word flag)
+{
+ word i;
+ ub4 a,b,c,d,e,f,g,h;
+ ub4 *m,*r;
+ ctx->randa = ctx->randb = ctx->randc = 0;
+ m=ctx->randmem;
+ r=ctx->randrsl;
+ a=b=c=d=e=f=g=h=0x9e3779b9; /* the golden ratio */
+
+ for (i=0; i<4; ++i) /* scramble it */
+ {
+ mix(a,b,c,d,e,f,g,h);
+ }
+
+ if (flag)
+ {
+ /* initialize using the contents of r[] as the seed */
+ for (i=0; i<RANDSIZ; i+=8)
+ {
+ a+=r[i ]; b+=r[i+1]; c+=r[i+2]; d+=r[i+3];
+ e+=r[i+4]; f+=r[i+5]; g+=r[i+6]; h+=r[i+7];
+ mix(a,b,c,d,e,f,g,h);
+ m[i ]=a; m[i+1]=b; m[i+2]=c; m[i+3]=d;
+ m[i+4]=e; m[i+5]=f; m[i+6]=g; m[i+7]=h;
+ }
+ /* do a second pass to make all of the seed affect all of m */
+ for (i=0; i<RANDSIZ; i+=8)
+ {
+ a+=m[i ]; b+=m[i+1]; c+=m[i+2]; d+=m[i+3];
+ e+=m[i+4]; f+=m[i+5]; g+=m[i+6]; h+=m[i+7];
+ mix(a,b,c,d,e,f,g,h);
+ m[i ]=a; m[i+1]=b; m[i+2]=c; m[i+3]=d;
+ m[i+4]=e; m[i+5]=f; m[i+6]=g; m[i+7]=h;
+ }
+ }
+ else
+ {
+ for (i=0; i<RANDSIZ; i+=8)
+ {
+ /* fill in mm[] with messy stuff */
+ mix(a,b,c,d,e,f,g,h);
+ m[i ]=a; m[i+1]=b; m[i+2]=c; m[i+3]=d;
+ m[i+4]=e; m[i+5]=f; m[i+6]=g; m[i+7]=h;
+ }
+ }
+
+ isaac(ctx); /* fill in the first set of results */
+ ctx->randcnt=RANDSIZ; /* prepare to use the first set of results */
+}
+
+
+#ifdef NEVER
+int main()
+{
+ ub4 i,j;
+ randctx ctx;
+ ctx.randa=ctx.randb=ctx.randc=(ub4)0;
+ for (i=0; i<256; ++i) ctx.randrsl[i]=(ub4)0;
+ randinit(&ctx, TRUE);
+ for (i=0; i<2; ++i)
+ {
+ isaac(&ctx);
+ for (j=0; j<256; ++j)
+ {
+ printf("%.8lx",ctx.randrsl[j]);
+ if ((j&7)==7) printf("\n");
+ }
+ }
+}
+#endif
diff --git a/src/rt/isaac/standard.h b/src/rt/isaac/standard.h
new file mode 100644
index 00000000..202a5d65
--- /dev/null
+++ b/src/rt/isaac/standard.h
@@ -0,0 +1,57 @@
+/*
+------------------------------------------------------------------------------
+Standard definitions and types, Bob Jenkins
+------------------------------------------------------------------------------
+*/
+#ifndef STANDARD
+# define STANDARD
+# ifndef STDIO
+# include <stdio.h>
+# define STDIO
+# endif
+# ifndef STDDEF
+# include <stddef.h>
+# define STDDEF
+# endif
+typedef unsigned long long ub8;
+#define UB8MAXVAL 0xffffffffffffffffLL
+#define UB8BITS 64
+typedef signed long long sb8;
+#define SB8MAXVAL 0x7fffffffffffffffLL
+typedef unsigned long int ub4; /* unsigned 4-byte quantities */
+#define UB4MAXVAL 0xffffffff
+typedef signed long int sb4;
+#define UB4BITS 32
+#define SB4MAXVAL 0x7fffffff
+typedef unsigned short int ub2;
+#define UB2MAXVAL 0xffff
+#define UB2BITS 16
+typedef signed short int sb2;
+#define SB2MAXVAL 0x7fff
+typedef unsigned char ub1;
+#define UB1MAXVAL 0xff
+#define UB1BITS 8
+typedef signed char sb1; /* signed 1-byte quantities */
+#define SB1MAXVAL 0x7f
+typedef int word; /* fastest type available */
+
+#define bis(target,mask) ((target) |= (mask))
+#define bic(target,mask) ((target) &= ~(mask))
+#define bit(target,mask) ((target) & (mask))
+#ifndef min
+# define min(a,b) (((a)<(b)) ? (a) : (b))
+#endif /* min */
+#ifndef max
+# define max(a,b) (((a)<(b)) ? (b) : (a))
+#endif /* max */
+#ifndef align
+# define align(a) (((ub4)a+(sizeof(void *)-1))&(~(sizeof(void *)-1)))
+#endif /* align */
+#ifndef abs
+# define abs(a) (((a)>0) ? (a) : -(a))
+#endif
+#define TRUE 1
+#define FALSE 0
+#define SUCCESS 0 /* 1 on VAX */
+
+#endif /* STANDARD */
diff --git a/src/rt/memcheck.h b/src/rt/memcheck.h
new file mode 100644
index 00000000..fc50dabf
--- /dev/null
+++ b/src/rt/memcheck.h
@@ -0,0 +1,309 @@
+
+/*
+ ----------------------------------------------------------------
+
+ Notice that the following BSD-style license applies to this one
+ file (memcheck.h) only. The rest of Valgrind is licensed under the
+ terms of the GNU General Public License, version 2, unless
+ otherwise indicated. See the COPYING file in the source
+ distribution for details.
+
+ ----------------------------------------------------------------
+
+ This file is part of MemCheck, a heavyweight Valgrind tool for
+ detecting memory errors.
+
+ Copyright (C) 2000-2009 Julian Seward. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+ 3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ----------------------------------------------------------------
+
+ Notice that the above BSD-style license applies to this one file
+ (memcheck.h) only. The entire rest of Valgrind is licensed under
+ the terms of the GNU General Public License, version 2. See the
+ COPYING file in the source distribution for details.
+
+ ----------------------------------------------------------------
+*/
+
+
+#ifndef __MEMCHECK_H
+#define __MEMCHECK_H
+
+
+/* This file is for inclusion into client (your!) code.
+
+ You can use these macros to manipulate and query memory permissions
+ inside your own programs.
+
+ See comment near the top of valgrind.h on how to use them.
+*/
+
+#include "valgrind.h"
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+ This enum comprises an ABI exported by Valgrind to programs
+ which use client requests. DO NOT CHANGE THE ORDER OF THESE
+ ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+ enum {
+ VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
+ VG_USERREQ__MAKE_MEM_UNDEFINED,
+ VG_USERREQ__MAKE_MEM_DEFINED,
+ VG_USERREQ__DISCARD,
+ VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
+ VG_USERREQ__CHECK_MEM_IS_DEFINED,
+ VG_USERREQ__DO_LEAK_CHECK,
+ VG_USERREQ__COUNT_LEAKS,
+
+ VG_USERREQ__GET_VBITS,
+ VG_USERREQ__SET_VBITS,
+
+ VG_USERREQ__CREATE_BLOCK,
+
+ VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
+
+ /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
+ VG_USERREQ__COUNT_LEAK_BLOCKS,
+
+ /* This is just for memcheck's internal use - don't use it */
+ _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
+ = VG_USERREQ_TOOL_BASE('M','C') + 256
+ } Vg_MemCheckClientRequest;
+
+
+
+/* Client-code macros to manipulate the state of memory. */
+
+/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__MAKE_MEM_NOACCESS, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Similarly, mark memory at _qzz_addr as addressable but undefined
+ for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__MAKE_MEM_UNDEFINED, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Similarly, mark memory at _qzz_addr as addressable and defined
+ for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__MAKE_MEM_DEFINED, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
+ not altered: bytes which are addressable are marked as defined,
+ but those which are not addressable are left unchanged. */
+#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Create a block-description handle. The description is an ascii
+ string which is included in any messages pertaining to addresses
+ within the specified memory range. Has no other effect on the
+ properties of the memory range. */
+#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__CREATE_BLOCK, \
+ _qzz_addr, _qzz_len, _qzz_desc, \
+ 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Discard a block-description-handle. Returns 1 for an
+ invalid handle, 0 for a valid handle. */
+#define VALGRIND_DISCARD(_qzz_blkindex) \
+ (__extension__ ({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__DISCARD, \
+ 0, _qzz_blkindex, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+
+/* Client-code macros to check the state of memory. */
+
+/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
+ If suitable addressibility is not established, Valgrind prints an
+ error message and returns the address of the first offending byte.
+ Otherwise it returns zero. */
+#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,\
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Check that memory at _qzz_addr is addressable and defined for
+ _qzz_len bytes. If suitable addressibility and definedness are not
+ established, Valgrind prints an error message and returns the
+ address of the first offending byte. Otherwise it returns zero. */
+#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__CHECK_MEM_IS_DEFINED, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Use this macro to force the definedness and addressibility of an
+ lvalue to be checked. If suitable addressibility and definedness
+ are not established, Valgrind prints an error message and returns
+ the address of the first offending byte. Otherwise it returns
+ zero. */
+#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
+ VALGRIND_CHECK_MEM_IS_DEFINED( \
+ (volatile unsigned char *)&(__lvalue), \
+ (unsigned long)(sizeof (__lvalue)))
+
+
+/* Do a full memory leak check (like --leak-check=full) mid-execution. */
+#define VALGRIND_DO_LEAK_CHECK \
+ {unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DO_LEAK_CHECK, \
+ 0, 0, 0, 0, 0); \
+ }
+
+/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
+#define VALGRIND_DO_QUICK_LEAK_CHECK \
+ {unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DO_LEAK_CHECK, \
+ 1, 0, 0, 0, 0); \
+ }
+
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
+ all previous leak checks. They must be lvalues. */
+#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
+ /* For safety on 64-bit platforms we assign the results to private
+ unsigned long variables, then assign these to the lvalues the user
+ specified, which works no matter what type 'leaked', 'dubious', etc
+ are. We also initialise '_qzz_leaked', etc because
+ VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
+ defined. */ \
+ {unsigned long _qzz_res; \
+ unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
+ unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__COUNT_LEAKS, \
+ &_qzz_leaked, &_qzz_dubious, \
+ &_qzz_reachable, &_qzz_suppressed, 0); \
+ leaked = _qzz_leaked; \
+ dubious = _qzz_dubious; \
+ reachable = _qzz_reachable; \
+ suppressed = _qzz_suppressed; \
+ }
+
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
+ all previous leak checks. They must be lvalues. */
+#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
+ /* For safety on 64-bit platforms we assign the results to private
+ unsigned long variables, then assign these to the lvalues the user
+ specified, which works no matter what type 'leaked', 'dubious', etc
+ are. We also initialise '_qzz_leaked', etc because
+ VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
+ defined. */ \
+ {unsigned long _qzz_res; \
+ unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
+ unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__COUNT_LEAK_BLOCKS, \
+ &_qzz_leaked, &_qzz_dubious, \
+ &_qzz_reachable, &_qzz_suppressed, 0); \
+ leaked = _qzz_leaked; \
+ dubious = _qzz_dubious; \
+ reachable = _qzz_reachable; \
+ suppressed = _qzz_suppressed; \
+ }
+
+
+/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
+ into the provided zzvbits array. Return values:
+ 0 if not running on valgrind
+ 1 success
+ 2 [previously indicated unaligned arrays; these are now allowed]
+ 3 if any parts of zzsrc/zzvbits are not addressable.
+ The metadata is not copied in cases 0, 2 or 3 so it should be
+ impossible to segfault your system by using this call.
+*/
+#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
+ (__extension__({unsigned long _qzz_res; \
+ char* czza = (char*)zza; \
+ char* czzvbits = (char*)zzvbits; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__GET_VBITS, \
+ czza, czzvbits, zznbytes, 0, 0 ); \
+ _qzz_res; \
+ }))
+
+/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
+ from the provided zzvbits array. Return values:
+ 0 if not running on valgrind
+ 1 success
+ 2 [previously indicated unaligned arrays; these are now allowed]
+ 3 if any parts of zza/zzvbits are not addressable.
+ The metadata is not copied in cases 0, 2 or 3 so it should be
+ impossible to segfault your system by using this call.
+*/
+#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
+ (__extension__({unsigned int _qzz_res; \
+ char* czza = (char*)zza; \
+ char* czzvbits = (char*)zzvbits; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__SET_VBITS, \
+ czza, czzvbits, zznbytes, 0, 0 ); \
+ _qzz_res; \
+ }))
+
+#endif
+
diff --git a/src/rt/rust.cpp b/src/rt/rust.cpp
new file mode 100644
index 00000000..8c725bfb
--- /dev/null
+++ b/src/rt/rust.cpp
@@ -0,0 +1,267 @@
+#include "rust_internal.h"
+#include "util/array_list.h"
+
+
+// #define TRACK_ALLOCATIONS
+// For debugging, keeps track of live allocations, so you can find out
+// exactly what leaked.
+
+#ifdef TRACK_ALLOCATIONS
+array_list<void *> allocation_list;
+#endif
+
+rust_srv::rust_srv() :
+ live_allocs(0)
+{
+}
+
+rust_srv::~rust_srv()
+{
+ if (live_allocs != 0) {
+ char msg[128];
+ snprintf(msg, sizeof(msg),
+ "leaked memory in rust main loop (%" PRIuPTR " objects)",
+ live_allocs);
+#ifdef TRACK_ALLOCATIONS
+ for (size_t i = 0; i < allocation_list.size(); i++) {
+ if (allocation_list[i] != NULL) {
+ printf("allocation 0x%" PRIxPTR " was not freed\n",
+ (uintptr_t) allocation_list[i]);
+ }
+ }
+#endif
+ fatal(msg, __FILE__, __LINE__);
+ }
+}
+
+void
+rust_srv::log(char const *str)
+{
+ printf("rt: %s\n", str);
+}
+
+
+
+void *
+rust_srv::malloc(size_t bytes)
+{
+ ++live_allocs;
+ void * val = ::malloc(bytes);
+#ifdef TRACK_ALLOCATIONS
+ allocation_list.append(val);
+#endif
+ return val;
+}
+
+void *
+rust_srv::realloc(void *p, size_t bytes)
+{
+ if (!p) {
+ live_allocs++;
+ }
+ void * val = ::realloc(p, bytes);
+#ifdef TRACK_ALLOCATIONS
+ if (allocation_list.replace(p, val) == NULL) {
+ fatal("not in allocation_list", __FILE__, __LINE__);
+ }
+#endif
+ return val;
+}
+
+void
+rust_srv::free(void *p)
+{
+ if (live_allocs < 1) {
+ fatal("live_allocs < 1", __FILE__, __LINE__);
+ }
+ live_allocs--;
+ ::free(p);
+#ifdef TRACK_ALLOCATIONS
+ if (allocation_list.replace(p, NULL) == NULL) {
+ fatal("not in allocation_list", __FILE__, __LINE__);
+ }
+#endif
+}
+
+void
+rust_srv::fatal(char const *expr, char const *file, size_t line)
+{
+ char buf[1024];
+ snprintf(buf, sizeof(buf),
+ "fatal, '%s' failed, %s:%d",
+ expr, file, (int)line);
+ log(buf);
+ exit(1);
+}
+
+rust_srv *
+rust_srv::clone()
+{
+ return new rust_srv();
+}
+
+
+int
+rust_main_loop(rust_dom *dom)
+{
+ // Make sure someone is watching, to pull us out of infinite loops.
+ rust_timer timer(*dom);
+
+ int rval;
+ rust_task *task;
+
+ dom->log(rust_log::DOM,
+ "running main-loop on domain 0x%" PRIxPTR, dom);
+ dom->logptr("exit-task glue",
+ dom->root_crate->get_exit_task_glue());
+
+ while ((task = dom->sched()) != NULL) {
+ I(dom, task->running());
+
+ dom->log(rust_log::TASK,
+ "activating task 0x%" PRIxPTR ", sp=0x%" PRIxPTR,
+ (uintptr_t)task, task->rust_sp);
+
+ dom->interrupt_flag = 0;
+
+ dom->activate(task);
+
+ dom->log(rust_log::TASK,
+ "returned from task 0x%" PRIxPTR
+ " in state '%s', sp=0x%" PRIxPTR,
+ (uintptr_t)task,
+ dom->state_vec_name(task->state),
+ task->rust_sp);
+
+ I(dom, task->rust_sp >= (uintptr_t) &task->stk->data[0]);
+ I(dom, task->rust_sp < task->stk->limit);
+
+ dom->reap_dead_tasks();
+ }
+
+ dom->log(rust_log::DOM, "finished main-loop (dom.rval = %d)", dom->rval);
+ rval = dom->rval;
+
+ return rval;
+}
+
+
+struct
+command_line_args
+{
+ rust_dom &dom;
+ int argc;
+ char **argv;
+
+ // vec[str] passed to rust_task::start.
+ rust_vec *args;
+
+ command_line_args(rust_dom &dom,
+ int sys_argc,
+ char **sys_argv)
+ : dom(dom),
+ argc(sys_argc),
+ argv(sys_argv),
+ args(NULL)
+ {
+#if defined(__WIN32__)
+ LPCWSTR cmdline = GetCommandLineW();
+ LPWSTR *wargv = CommandLineToArgvW(cmdline, &argc);
+ dom.win32_require("CommandLineToArgvW", argv != NULL);
+ argv = (char **) dom.malloc(sizeof(char*) * argc);
+ for (int i = 0; i < argc; ++i) {
+ int n_chars = WideCharToMultiByte(CP_UTF8, 0, wargv[i], -1,
+ NULL, 0, NULL, NULL);
+ dom.win32_require("WideCharToMultiByte(0)", n_chars != 0);
+ argv[i] = (char *) dom.malloc(n_chars);
+ n_chars = WideCharToMultiByte(CP_UTF8, 0, wargv[i], -1,
+ argv[i], n_chars, NULL, NULL);
+ dom.win32_require("WideCharToMultiByte(1)", n_chars != 0);
+ }
+ LocalFree(wargv);
+#endif
+ size_t vec_fill = sizeof(rust_str *) * argc;
+ size_t vec_alloc = next_power_of_two(sizeof(rust_vec) + vec_fill);
+ void *mem = dom.malloc(vec_alloc);
+ args = new (mem) rust_vec(&dom, vec_alloc, 0, NULL);
+ rust_str **strs = (rust_str**) &args->data[0];
+ for (int i = 0; i < argc; ++i) {
+ size_t str_fill = strlen(argv[i]) + 1;
+ size_t str_alloc = next_power_of_two(sizeof(rust_str) + str_fill);
+ mem = dom.malloc(str_alloc);
+ strs[i] = new (mem) rust_str(&dom, str_alloc, str_fill,
+ (uint8_t const *)argv[i]);
+ }
+ args->fill = vec_fill;
+ // If the caller has a declared args array, they may drop; but
+ // we don't know if they have such an array. So we pin the args
+ // array here to ensure it survives to program-shutdown.
+ args->ref();
+ }
+
+ ~command_line_args() {
+ if (args) {
+ // Drop the args we've had pinned here.
+ rust_str **strs = (rust_str**) &args->data[0];
+ for (int i = 0; i < argc; ++i)
+ dom.free(strs[i]);
+ dom.free(args);
+ }
+
+#ifdef __WIN32__
+ for (int i = 0; i < argc; ++i) {
+ dom.free(argv[i]);
+ }
+ dom.free(argv);
+#endif
+ }
+};
+
+
+extern "C" CDECL int
+rust_start(uintptr_t main_fn, rust_crate const *crate, int argc, char **argv)
+{
+ int ret;
+ {
+ rust_srv srv;
+ rust_dom dom(&srv, crate);
+ command_line_args args(dom, argc, argv);
+
+ dom.log(rust_log::DOM, "startup: %d args", args.argc);
+ for (int i = 0; i < args.argc; ++i)
+ dom.log(rust_log::DOM,
+ "startup: arg[%d] = '%s'", i, args.argv[i]);
+
+ if (dom._log.is_tracing(rust_log::DWARF)) {
+ rust_crate_reader rdr(&dom, crate);
+ }
+
+ uintptr_t main_args[3] = { 0, 0, (uintptr_t)args.args };
+
+ dom.root_task->start(crate->get_exit_task_glue(),
+ main_fn,
+ (uintptr_t)&main_args,
+ sizeof(main_args));
+
+ ret = rust_main_loop(&dom);
+ }
+
+#if !defined(__WIN32__)
+ // Don't take down the process if the main thread exits without an
+ // error.
+ if (!ret)
+ pthread_exit(NULL);
+#endif
+ return ret;
+}
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
diff --git a/src/rt/rust.h b/src/rt/rust.h
new file mode 100644
index 00000000..135a1799
--- /dev/null
+++ b/src/rt/rust.h
@@ -0,0 +1,49 @@
+#ifndef RUST_H
+#define RUST_H
+
+/*
+ * Include this file after you've defined the ISO C9x stdint
+ * types (size_t, uint8_t, uintptr_t, etc.)
+ */
+
+#ifdef __i386__
+// 'cdecl' ABI only means anything on i386
+#ifdef __WIN32__
+#define CDECL __cdecl
+#else
+#define CDECL __attribute__((cdecl))
+#endif
+#else
+#define CDECL
+#endif
+
+struct rust_srv {
+ size_t live_allocs;
+
+ virtual void log(char const *);
+ virtual void fatal(char const *, char const *, size_t);
+ virtual void *malloc(size_t);
+ virtual void *realloc(void *, size_t);
+ virtual void free(void *);
+ virtual rust_srv *clone();
+
+ rust_srv();
+ virtual ~rust_srv();
+};
+
+inline void *operator new(size_t size, rust_srv *srv)
+{
+ return srv->malloc(size);
+}
+
+/*
+ * Local Variables:
+ * fill-column: 78;
+ * indent-tabs-mode: nil
+ * c-basic-offset: 4
+ * buffer-file-coding-system: utf-8-unix
+ * compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+ * End:
+ */
+
+#endif /* RUST_H */
diff --git a/src/rt/rust_builtin.cpp b/src/rt/rust_builtin.cpp
new file mode 100644
index 00000000..71aa644b
--- /dev/null
+++ b/src/rt/rust_builtin.cpp
@@ -0,0 +1,129 @@
+
+#include "rust_internal.h"
+
+/* Native builtins. */
+extern "C" CDECL rust_str*
+str_alloc(rust_task *task, size_t n_bytes)
+{
+ rust_dom *dom = task->dom;
+ size_t alloc = next_power_of_two(sizeof(rust_str) + n_bytes);
+ void *mem = dom->malloc(alloc);
+ if (!mem) {
+ task->fail(2);
+ return NULL;
+ }
+ rust_str *st = new (mem) rust_str(dom, alloc, 1, (uint8_t const *)"");
+ return st;
+}
+
+extern "C" CDECL rust_str*
+last_os_error(rust_task *task) {
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::TASK, "last_os_error()");
+
+#if defined(__WIN32__)
+ LPTSTR buf;
+ DWORD err = GetLastError();
+ DWORD res = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, err,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPTSTR) &buf, 0, NULL);
+ if (!res) {
+ task->fail(1);
+ return NULL;
+ }
+#elif defined(_GNU_SOURCE)
+ char cbuf[1024];
+ char *buf = strerror_r(errno, cbuf, sizeof(cbuf));
+ if (!buf) {
+ task->fail(1);
+ return NULL;
+ }
+#else
+ char buf[1024];
+ int err = strerror_r(errno, buf, sizeof(buf));
+ if (err) {
+ task->fail(1);
+ return NULL;
+ }
+#endif
+ size_t fill = strlen(buf) + 1;
+ size_t alloc = next_power_of_two(sizeof(rust_str) + fill);
+ void *mem = dom->malloc(alloc);
+ if (!mem) {
+ task->fail(1);
+ return NULL;
+ }
+ rust_str *st = new (mem) rust_str(dom, alloc, fill, (const uint8_t *)buf);
+
+#ifdef __WIN32__
+ LocalFree((HLOCAL)buf);
+#endif
+ return st;
+}
+
+extern "C" CDECL size_t
+size_of(rust_task *task, type_desc *t) {
+ return t->size;
+}
+
+extern "C" CDECL size_t
+align_of(rust_task *task, type_desc *t) {
+ return t->align;
+}
+
+extern "C" CDECL size_t
+refcount(rust_task *task, type_desc *t, size_t *v) {
+ // Passed-in value has refcount 1 too high
+ // because it was ref'ed while making the call.
+ return (*v) - 1;
+}
+
+extern "C" CDECL rust_vec*
+vec_alloc(rust_task *task, type_desc *t, size_t n_elts)
+{
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::MEM,
+ "vec_alloc %" PRIdPTR " elements of size %" PRIdPTR,
+ n_elts, t->size);
+ size_t fill = n_elts * t->size;
+ size_t alloc = next_power_of_two(sizeof(rust_vec) + fill);
+ void *mem = dom->malloc(alloc);
+ if (!mem) {
+ task->fail(3);
+ return NULL;
+ }
+ rust_vec *vec = new (mem) rust_vec(dom, alloc, 0, NULL);
+ return vec;
+}
+
+extern "C" CDECL char const *
+str_buf(rust_task *task, rust_str *s)
+{
+ return (char const *)&s->data[0];
+}
+
+extern "C" CDECL void *
+vec_buf(rust_task *task, type_desc *ty, rust_vec *v)
+{
+ return (void *)&v->data[0];
+}
+
+extern "C" CDECL size_t
+vec_len(rust_task *task, type_desc *ty, rust_vec *v)
+{
+ return v->fill;
+}
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
diff --git a/src/rt/rust_chan.cpp b/src/rt/rust_chan.cpp
new file mode 100644
index 00000000..38f93a7d
--- /dev/null
+++ b/src/rt/rust_chan.cpp
@@ -0,0 +1,34 @@
+
+#include "rust_internal.h"
+#include "rust_chan.h"
+
+rust_chan::rust_chan(rust_task *task, rust_port *port) :
+ task(task),
+ port(port),
+ buffer(task->dom, port->unit_sz),
+ token(this)
+{
+ if (port)
+ port->chans.push(this);
+}
+
+rust_chan::~rust_chan()
+{
+ if (port) {
+ if (token.pending())
+ token.withdraw();
+ port->chans.swapdel(this);
+ }
+}
+
+void
+rust_chan::disassociate()
+{
+ I(task->dom, port);
+
+ if (token.pending())
+ token.withdraw();
+
+ // Delete reference to the port/
+ port = NULL;
+}
diff --git a/src/rt/rust_chan.h b/src/rt/rust_chan.h
new file mode 100644
index 00000000..a56ba0ca
--- /dev/null
+++ b/src/rt/rust_chan.h
@@ -0,0 +1,22 @@
+
+#ifndef RUST_CHAN_H
+#define RUST_CHAN_H
+
+class rust_chan : public rc_base<rust_chan>, public task_owned<rust_chan> {
+public:
+ rust_chan(rust_task *task, rust_port *port);
+ ~rust_chan();
+
+ rust_task *task;
+ rust_port *port;
+ circ_buf buffer;
+ size_t idx; // Index into port->chans.
+
+ // Token belonging to this chan, it will be placed into a port's
+ // writers vector if we have something to send to the port.
+ rust_token token;
+
+ void disassociate();
+};
+
+#endif /* RUST_CHAN_H */
diff --git a/src/rt/rust_comm.cpp b/src/rt/rust_comm.cpp
new file mode 100644
index 00000000..58b9ef4c
--- /dev/null
+++ b/src/rt/rust_comm.cpp
@@ -0,0 +1,199 @@
+
+#include "rust_internal.h"
+
+template class ptr_vec<rust_token>;
+template class ptr_vec<rust_alarm>;
+template class ptr_vec<rust_chan>;
+
+rust_alarm::rust_alarm(rust_task *receiver) :
+ receiver(receiver)
+{
+}
+
+
+// Circular buffers.
+
+circ_buf::circ_buf(rust_dom *dom, size_t unit_sz) :
+ dom(dom),
+ alloc(INIT_CIRC_BUF_UNITS * unit_sz),
+ unit_sz(unit_sz),
+ next(0),
+ unread(0),
+ data((uint8_t *)dom->calloc(alloc))
+{
+ I(dom, unit_sz);
+ dom->log(rust_log::MEM|rust_log::COMM,
+ "new circ_buf(alloc=%d, unread=%d) -> circ_buf=0x%" PRIxPTR,
+ alloc, unread, this);
+ I(dom, data);
+}
+
+circ_buf::~circ_buf()
+{
+ dom->log(rust_log::MEM|rust_log::COMM,
+ "~circ_buf 0x%" PRIxPTR,
+ this);
+ I(dom, data);
+ // I(dom, unread == 0);
+ dom->free(data);
+}
+
+void
+circ_buf::transfer(void *dst)
+{
+ size_t i;
+ uint8_t *d = (uint8_t *)dst;
+ I(dom, dst);
+ for (i = 0; i < unread; i += unit_sz)
+ memcpy(&d[i], &data[next + i % alloc], unit_sz);
+}
+
+void
+circ_buf::push(void *src)
+{
+ size_t i;
+ void *tmp;
+
+ I(dom, src);
+ I(dom, unread <= alloc);
+
+ /* Grow if necessary. */
+ if (unread == alloc) {
+ I(dom, alloc <= MAX_CIRC_BUF_SIZE);
+ tmp = dom->malloc(alloc << 1);
+ transfer(tmp);
+ alloc <<= 1;
+ dom->free(data);
+ data = (uint8_t *)tmp;
+ }
+
+ dom->log(rust_log::MEM|rust_log::COMM,
+ "circ buf push, unread=%d, alloc=%d, unit_sz=%d",
+ unread, alloc, unit_sz);
+
+ I(dom, unread < alloc);
+ I(dom, unread + unit_sz <= alloc);
+
+ i = (next + unread) % alloc;
+ memcpy(&data[i], src, unit_sz);
+
+ dom->log(rust_log::MEM|rust_log::COMM, "pushed data at index %d", i);
+ unread += unit_sz;
+}
+
+void
+circ_buf::shift(void *dst)
+{
+ size_t i;
+ void *tmp;
+
+ I(dom, dst);
+ I(dom, unit_sz > 0);
+ I(dom, unread >= unit_sz);
+ I(dom, unread <= alloc);
+ I(dom, data);
+ i = next;
+ memcpy(dst, &data[i], unit_sz);
+ dom->log(rust_log::MEM|rust_log::COMM, "shifted data from index %d", i);
+ unread -= unit_sz;
+ next += unit_sz;
+ I(dom, next <= alloc);
+ if (next == alloc)
+ next = 0;
+
+ /* Shrink if necessary. */
+ if (alloc >= INIT_CIRC_BUF_UNITS * unit_sz &&
+ unread <= alloc / 4) {
+ tmp = dom->malloc(alloc / 2);
+ transfer(tmp);
+ alloc >>= 1;
+ dom->free(data);
+ data = (uint8_t *)tmp;
+ }
+}
+
+
+// Ports.
+
+rust_port::rust_port(rust_task *task, size_t unit_sz) :
+ task(task),
+ unit_sz(unit_sz),
+ writers(task->dom),
+ chans(task->dom)
+{
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::MEM|rust_log::COMM,
+ "new rust_port(task=0x%" PRIxPTR ", unit_sz=%d) -> port=0x%"
+ PRIxPTR, (uintptr_t)task, unit_sz, (uintptr_t)this);
+}
+
+rust_port::~rust_port()
+{
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::COMM|rust_log::MEM,
+ "~rust_port 0x%" PRIxPTR,
+ (uintptr_t)this);
+ while (chans.length() > 0)
+ chans.pop()->disassociate();
+}
+
+
+// Tokens.
+
+rust_token::rust_token(rust_chan *chan) :
+ chan(chan),
+ idx(0),
+ submitted(false)
+{
+}
+
+rust_token::~rust_token()
+{
+}
+
+bool
+rust_token::pending() const
+{
+ return submitted;
+}
+
+void
+rust_token::submit()
+{
+ rust_port *port = chan->port;
+ rust_dom *dom = chan->task->dom;
+
+ I(dom, port);
+ I(dom, !submitted);
+
+ port->writers.push(this);
+ submitted = true;
+}
+
+void
+rust_token::withdraw()
+{
+ rust_task *task = chan->task;
+ rust_port *port = chan->port;
+ rust_dom *dom = task->dom;
+
+ I(dom, port);
+ I(dom, submitted);
+
+ if (task->blocked())
+ task->wakeup(this); // must be blocked on us (or dead)
+ port->writers.swapdel(this);
+ submitted = false;
+}
+
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
diff --git a/src/rt/rust_crate.cpp b/src/rt/rust_crate.cpp
new file mode 100644
index 00000000..d609ac64
--- /dev/null
+++ b/src/rt/rust_crate.cpp
@@ -0,0 +1,63 @@
+
+#include "rust_internal.h"
+
+uintptr_t
+rust_crate::get_image_base() const {
+ return ((uintptr_t)this + image_base_off);
+}
+
+ptrdiff_t
+rust_crate::get_relocation_diff() const {
+ return ((uintptr_t)this - self_addr);
+}
+
+activate_glue_ty
+rust_crate::get_activate_glue() const {
+ return (activate_glue_ty) ((uintptr_t)this + activate_glue_off);
+}
+
+uintptr_t
+rust_crate::get_exit_task_glue() const {
+ return ((uintptr_t)this + exit_task_glue_off);
+}
+
+uintptr_t
+rust_crate::get_unwind_glue() const {
+ return ((uintptr_t)this + unwind_glue_off);
+}
+
+uintptr_t
+rust_crate::get_yield_glue() const {
+ return ((uintptr_t)this + yield_glue_off);
+}
+
+rust_crate::mem_area::mem_area(rust_dom *dom, uintptr_t pos, size_t sz)
+ : dom(dom),
+ base(pos),
+ lim(pos + sz)
+{
+ dom->log(rust_log::MEM, "new mem_area [0x%" PRIxPTR ",0x%" PRIxPTR "]",
+ base, lim);
+}
+
+rust_crate::mem_area
+rust_crate::get_debug_info(rust_dom *dom) const {
+ return mem_area(dom, ((uintptr_t)this + debug_info_off),
+ debug_info_sz);
+}
+
+rust_crate::mem_area
+rust_crate::get_debug_abbrev(rust_dom *dom) const {
+ return mem_area(dom, ((uintptr_t)this + debug_abbrev_off),
+ debug_abbrev_sz);
+}
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
diff --git a/src/rt/rust_crate_cache.cpp b/src/rt/rust_crate_cache.cpp
new file mode 100644
index 00000000..fa10b91b
--- /dev/null
+++ b/src/rt/rust_crate_cache.cpp
@@ -0,0 +1,306 @@
+
+#include "rust_internal.h"
+
+rust_crate_cache::lib::lib(rust_dom *dom, char const *name)
+ : handle(0),
+ dom(dom)
+{
+#if defined(__WIN32__)
+ handle = (uintptr_t)LoadLibrary(_T(name));
+#else
+ handle = (uintptr_t)dlopen(name, RTLD_LOCAL|RTLD_LAZY);
+#endif
+ dom->log(rust_log::CACHE, "loaded library '%s' as 0x%" PRIxPTR,
+ name, handle);
+}
+
+rust_crate_cache::lib::~lib() {
+ dom->log(rust_log::CACHE, "~rust_crate_cache::lib(0x%" PRIxPTR ")",
+ handle);
+ if (handle) {
+#if defined(__WIN32__)
+ FreeLibrary((HMODULE)handle);
+#else
+ dlclose((void*)handle);
+#endif
+ }
+}
+
+uintptr_t
+rust_crate_cache::lib::get_handle() {
+ return handle;
+}
+
+
+
+rust_crate_cache::c_sym::c_sym(rust_dom *dom, lib *library, char const *name)
+ : val(0),
+ library(library),
+ dom(dom)
+{
+ library->ref();
+ uintptr_t handle = library->get_handle();
+ if (handle) {
+#if defined(__WIN32__)
+ val = (uintptr_t)GetProcAddress((HMODULE)handle, _T(name));
+#else
+ val = (uintptr_t)dlsym((void*)handle, name);
+#endif
+ dom->log(rust_log::CACHE, "resolved symbol '%s' to 0x%" PRIxPTR,
+ name, val);
+ } else {
+ dom->log(rust_log::CACHE, "unresolved symbol '%s', null lib handle",
+ name);
+ }
+}
+
+rust_crate_cache::c_sym::~c_sym() {
+ dom->log(rust_log::CACHE,
+ "~rust_crate_cache::c_sym(0x%" PRIxPTR ")", val);
+ library->deref();
+}
+
+uintptr_t
+rust_crate_cache::c_sym::get_val() {
+ return val;
+}
+
+
+
+rust_crate_cache::rust_sym::rust_sym(rust_dom *dom,
+ rust_crate const *curr_crate,
+ c_sym *crate_sym,
+ char const **path)
+ : val(0),
+ crate_sym(crate_sym),
+ dom(dom)
+{
+ crate_sym->ref();
+ typedef rust_crate_reader::die die;
+ rust_crate const *crate = (rust_crate*)crate_sym->get_val();
+ if (!crate) {
+ dom->log(rust_log::CACHE,
+ "failed to resolve symbol, null crate symbol");
+ return;
+ }
+ rust_crate_reader rdr(dom, crate);
+ bool found_root = false;
+ bool found_leaf = false;
+ for (die d = rdr.dies.first_die();
+ !(found_root || d.is_null());
+ d = d.next_sibling()) {
+
+ die t1 = d;
+ die t2 = d;
+ for (char const **c = crate_rel(curr_crate, path);
+ (*c
+ && !t1.is_null()
+ && t1.find_child_by_name(crate_rel(curr_crate, *c), t2));
+ ++c, t1=t2) {
+ dom->log(rust_log::DWARF|rust_log::CACHE,
+ "matched die <0x%" PRIxPTR
+ ">, child '%s' = die<0x%" PRIxPTR ">",
+ t1.off, crate_rel(curr_crate, *c), t2.off);
+ found_root = found_root || true;
+ if (!*(c+1) && t2.find_num_attr(DW_AT_low_pc, val)) {
+ dom->log(rust_log::DWARF|rust_log::CACHE,
+ "found relative address: 0x%" PRIxPTR, val);
+ dom->log(rust_log::DWARF|rust_log::CACHE,
+ "plus image-base 0x%" PRIxPTR,
+ crate->get_image_base());
+ val += crate->get_image_base();
+ found_leaf = true;
+ break;
+ }
+ }
+ if (found_root || found_leaf)
+ break;
+ }
+ if (found_leaf) {
+ dom->log(rust_log::CACHE, "resolved symbol to 0x%" PRIxPTR, val);
+ } else {
+ dom->log(rust_log::CACHE, "failed to resolve symbol");
+ }
+}
+
+rust_crate_cache::rust_sym::~rust_sym() {
+ dom->log(rust_log::CACHE,
+ "~rust_crate_cache::rust_sym(0x%" PRIxPTR ")", val);
+ crate_sym->deref();
+}
+
+uintptr_t
+rust_crate_cache::rust_sym::get_val() {
+ return val;
+}
+
+
+
+rust_crate_cache::lib *
+rust_crate_cache::get_lib(size_t n, char const *name)
+{
+ I(dom, n < crate->n_libs);
+ lib *library = libs[n];
+ if (!library) {
+ library = new (dom) lib(dom, name);
+ libs[n] = library;
+ }
+ return library;
+}
+
+rust_crate_cache::c_sym *
+rust_crate_cache::get_c_sym(size_t n, lib *library, char const *name)
+{
+ I(dom, n < crate->n_c_syms);
+ c_sym *sym = c_syms[n];
+ dom->log(rust_log::CACHE, "cached C symbol %s = 0x%" PRIxPTR, name, sym);
+ if (!sym) {
+ sym = new (dom) c_sym(dom, library, name);
+ c_syms[n] = sym;
+ }
+ return sym;
+}
+
+rust_crate_cache::rust_sym *
+rust_crate_cache::get_rust_sym(size_t n,
+ rust_dom *dom,
+ rust_crate const *curr_crate,
+ c_sym *crate_sym,
+ char const **path)
+{
+ I(dom, n < crate->n_rust_syms);
+ rust_sym *sym = rust_syms[n];
+ if (!sym) {
+ sym = new (dom) rust_sym(dom, curr_crate, crate_sym, path);
+ rust_syms[n] = sym;
+ }
+ return sym;
+}
+
+static inline void
+adjust_disp(uintptr_t &disp, const void *oldp, const void *newp)
+{
+ if (disp) {
+ disp += (uintptr_t)oldp;
+ disp -= (uintptr_t)newp;
+ }
+}
+
+type_desc *
+rust_crate_cache::get_type_desc(size_t size,
+ size_t align,
+ size_t n_descs,
+ type_desc const **descs)
+{
+ I(dom, n_descs > 1);
+ type_desc *td = NULL;
+ size_t keysz = n_descs * sizeof(type_desc*);
+ HASH_FIND(hh, this->type_descs, descs, keysz, td);
+ if (td) {
+ dom->log(rust_log::CACHE, "rust_crate_cache::get_type_desc hit");
+ return td;
+ }
+ dom->log(rust_log::CACHE, "rust_crate_cache::get_type_desc miss");
+ td = (type_desc*) dom->malloc(sizeof(type_desc) + keysz);
+ if (!td)
+ return NULL;
+ // By convention, desc 0 is the root descriptor.
+ // but we ignore the size and alignment of it and use the
+ // passed-in, computed values.
+ memcpy(td, descs[0], sizeof(type_desc));
+ td->first_param = &td->descs[1];
+ td->size = size;
+ td->align = align;
+ for (size_t i = 0; i < n_descs; ++i) {
+ dom->log(rust_log::CACHE,
+ "rust_crate_cache::descs[%" PRIdPTR "] = 0x%" PRIxPTR,
+ i, descs[i]);
+ td->descs[i] = descs[i];
+ }
+ adjust_disp(td->copy_glue_off, descs[0], td);
+ adjust_disp(td->drop_glue_off, descs[0], td);
+ adjust_disp(td->free_glue_off, descs[0], td);
+ adjust_disp(td->mark_glue_off, descs[0], td);
+ adjust_disp(td->obj_drop_glue_off, descs[0], td);
+ HASH_ADD(hh, this->type_descs, descs, keysz, td);
+ return td;
+}
+
+rust_crate_cache::rust_crate_cache(rust_dom *dom,
+ rust_crate const *crate)
+ : rust_syms((rust_sym**)
+ dom->calloc(sizeof(rust_sym*) * crate->n_rust_syms)),
+ c_syms((c_sym**) dom->calloc(sizeof(c_sym*) * crate->n_c_syms)),
+ libs((lib**) dom->calloc(sizeof(lib*) * crate->n_libs)),
+ type_descs(NULL),
+ crate(crate),
+ dom(dom),
+ idx(0)
+{
+ I(dom, rust_syms);
+ I(dom, c_syms);
+ I(dom, libs);
+}
+
+void
+rust_crate_cache::flush() {
+ dom->log(rust_log::CACHE, "rust_crate_cache::flush()");
+ for (size_t i = 0; i < crate->n_rust_syms; ++i) {
+ rust_sym *s = rust_syms[i];
+ if (s) {
+ dom->log(rust_log::CACHE,
+ "rust_crate_cache::flush() deref rust_sym %"
+ PRIdPTR " (rc=%" PRIdPTR ")", i, s->refcnt);
+ s->deref();
+ }
+ rust_syms[i] = NULL;
+ }
+
+ for (size_t i = 0; i < crate->n_c_syms; ++i) {
+ c_sym *s = c_syms[i];
+ if (s) {
+ dom->log(rust_log::CACHE,
+ "rust_crate_cache::flush() deref c_sym %"
+ PRIdPTR " (rc=%" PRIdPTR ")", i, s->refcnt);
+ s->deref();
+ }
+ c_syms[i] = NULL;
+ }
+
+ for (size_t i = 0; i < crate->n_libs; ++i) {
+ lib *l = libs[i];
+ if (l) {
+ dom->log(rust_log::CACHE, "rust_crate_cache::flush() deref lib %"
+ PRIdPTR " (rc=%" PRIdPTR ")", i, l->refcnt);
+ l->deref();
+ }
+ libs[i] = NULL;
+ }
+
+ while (type_descs) {
+ type_desc *d = type_descs;
+ HASH_DEL(type_descs, d);
+ dom->log(rust_log::MEM,
+ "rust_crate_cache::flush() tydesc %" PRIxPTR, d);
+ dom->free(d);
+ }
+}
+
+rust_crate_cache::~rust_crate_cache()
+{
+ flush();
+ dom->free(rust_syms);
+ dom->free(c_syms);
+ dom->free(libs);
+}
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
diff --git a/src/rt/rust_crate_reader.cpp b/src/rt/rust_crate_reader.cpp
new file mode 100644
index 00000000..3c36729f
--- /dev/null
+++ b/src/rt/rust_crate_reader.cpp
@@ -0,0 +1,578 @@
+
+#include "rust_internal.h"
+
+bool
+rust_crate_reader::mem_reader::is_ok()
+{
+ return ok;
+}
+
+bool
+rust_crate_reader::mem_reader::at_end()
+{
+ return pos == mem.lim;
+}
+
+void
+rust_crate_reader::mem_reader::fail()
+{
+ ok = false;
+}
+
+void
+rust_crate_reader::mem_reader::reset()
+{
+ pos = mem.base;
+ ok = true;
+}
+
+rust_crate_reader::mem_reader::mem_reader(rust_crate::mem_area &m)
+ : mem(m),
+ ok(true),
+ pos(m.base)
+{}
+
+size_t
+rust_crate_reader::mem_reader::tell_abs()
+{
+ return pos;
+}
+
+size_t
+rust_crate_reader::mem_reader::tell_off()
+{
+ return pos - mem.base;
+}
+
+void
+rust_crate_reader::mem_reader::seek_abs(uintptr_t p)
+{
+ if (!ok || p < mem.base || p >= mem.lim)
+ ok = false;
+ else
+ pos = p;
+}
+
+void
+rust_crate_reader::mem_reader::seek_off(uintptr_t p)
+{
+ seek_abs(p + mem.base);
+}
+
+
+bool
+rust_crate_reader::mem_reader::adv_zstr(size_t sz)
+{
+ sz = 0;
+ while (ok) {
+ char c;
+ get(c);
+ ++sz;
+ if (c == '\0')
+ return true;
+ }
+ return false;
+}
+
+bool
+rust_crate_reader::mem_reader::get_zstr(char const *&c, size_t &sz)
+{
+ if (!ok)
+ return false;
+ c = (char const *)(pos);
+ return adv_zstr(sz);
+}
+
+void
+rust_crate_reader::mem_reader::adv(size_t amt)
+{
+ if (pos < mem.base
+ || pos >= mem.lim
+ || pos + amt > mem.lim)
+ ok = false;
+ if (!ok)
+ return;
+ // mem.dom->log(rust_log::MEM, "adv %d bytes", amt);
+ pos += amt;
+ ok &= !at_end();
+ I(mem.dom, at_end() || (mem.base <= pos && pos < mem.lim));
+}
+
+
+rust_crate_reader::abbrev::abbrev(rust_dom *dom,
+ uintptr_t body_off,
+ size_t body_sz,
+ uintptr_t tag,
+ uint8_t has_children) :
+ dom(dom),
+ body_off(body_off),
+ tag(tag),
+ has_children(has_children),
+ idx(0)
+{}
+
+
+rust_crate_reader::abbrev_reader::abbrev_reader
+ (rust_crate::mem_area &abbrev_mem)
+ : mem_reader(abbrev_mem),
+ abbrevs(abbrev_mem.dom)
+{
+ rust_dom *dom = mem.dom;
+ while (is_ok()) {
+
+ // dom->log(rust_log::DWARF, "reading new abbrev at 0x%" PRIxPTR,
+ // tell_off());
+
+ uintptr_t idx, tag;
+ uint8_t has_children;
+ get_uleb(idx);
+ get_uleb(tag);
+ get(has_children);
+
+ uintptr_t attr, form;
+ size_t body_off = tell_off();
+ while (is_ok() && step_attr_form_pair(attr, form));
+
+ // dom->log(rust_log::DWARF,
+ // "finished scanning attr/form pairs, pos=0x%"
+ // PRIxPTR ", lim=0x%" PRIxPTR ", is_ok=%d, at_end=%d",
+ // pos, mem.lim, is_ok(), at_end());
+
+ if (is_ok() || at_end()) {
+ dom->log(rust_log::DWARF, "read abbrev: %" PRIdPTR, idx);
+ I(dom, idx = abbrevs.length() + 1);
+ abbrevs.push(new (dom) abbrev(dom, body_off,
+ tell_off() - body_off,
+ tag, has_children));
+ }
+ }
+}
+
+rust_crate_reader::abbrev *
+rust_crate_reader::abbrev_reader::get_abbrev(size_t i) {
+ i -= 1;
+ if (i < abbrevs.length())
+ return abbrevs[i];
+ return NULL;
+}
+
+bool
+rust_crate_reader::abbrev_reader::step_attr_form_pair(uintptr_t &attr,
+ uintptr_t &form)
+{
+ attr = 0;
+ form = 0;
+ // mem.dom->log(rust_log::DWARF, "reading attr/form pair at 0x%" PRIxPTR,
+ // tell_off());
+ get_uleb(attr);
+ get_uleb(form);
+ // mem.dom->log(rust_log::DWARF, "attr 0x%" PRIxPTR ", form 0x%" PRIxPTR,
+ // attr, form);
+ return ! (attr == 0 && form == 0);
+}
+rust_crate_reader::abbrev_reader::~abbrev_reader() {
+ while (abbrevs.length()) {
+ delete abbrevs.pop();
+ }
+}
+
+
+bool
+rust_crate_reader::attr::is_numeric() const
+{
+ switch (form) {
+ case DW_FORM_ref_addr:
+ case DW_FORM_addr:
+ case DW_FORM_data4:
+ case DW_FORM_data1:
+ case DW_FORM_flag:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+bool
+rust_crate_reader::attr::is_string() const
+{
+ return form == DW_FORM_string;
+}
+
+size_t
+rust_crate_reader::attr::get_ssz(rust_dom *dom) const
+{
+ I(dom, is_string());
+ return val.str.sz;
+}
+
+char const *
+rust_crate_reader::attr::get_str(rust_dom *dom) const
+{
+ I(dom, is_string());
+ return val.str.s;
+}
+
+uintptr_t
+rust_crate_reader::attr::get_num(rust_dom *dom) const
+{
+ I(dom, is_numeric());
+ return val.num;
+}
+
+bool
+rust_crate_reader::attr::is_unknown() const {
+ return !(is_numeric() || is_string());
+}
+
+rust_crate_reader::rdr_sess::rdr_sess(die_reader *rdr) : rdr(rdr)
+{
+ I(rdr->mem.dom, !rdr->in_use);
+ rdr->in_use = true;
+}
+
+rust_crate_reader::rdr_sess::~rdr_sess()
+{
+ rdr->in_use = false;
+}
+
+rust_crate_reader::die::die(die_reader *rdr, uintptr_t off)
+ : rdr(rdr),
+ off(off),
+ using_rdr(false)
+{
+ rust_dom *dom = rdr->mem.dom;
+ rdr_sess use(rdr);
+
+ rdr->reset();
+ rdr->seek_off(off);
+ if (!rdr->is_ok()) {
+ ab = NULL;
+ return;
+ }
+ size_t ab_idx;
+ rdr->get_uleb(ab_idx);
+ if (!ab_idx) {
+ ab = NULL;
+ dom->log(rust_log::DWARF, "DIE <0x%" PRIxPTR "> (null)", off);
+ } else {
+ ab = rdr->abbrevs.get_abbrev(ab_idx);
+ dom->log(rust_log::DWARF, "DIE <0x%" PRIxPTR "> abbrev 0x%"
+ PRIxPTR, off, ab_idx);
+ dom->log(rust_log::DWARF, " tag 0x%x, has children: %d",
+ ab->tag, ab->has_children);
+ }
+}
+
+bool
+rust_crate_reader::die::is_null() const
+{
+ return ab == NULL;
+}
+
+bool
+rust_crate_reader::die::has_children() const
+{
+ return (!is_null()) && ab->has_children;
+}
+
+dw_tag
+rust_crate_reader::die::tag() const
+{
+ if (is_null())
+ return (dw_tag) (-1);
+ return (dw_tag) ab->tag;
+}
+
+bool
+rust_crate_reader::die::start_attrs() const
+{
+ if (is_null())
+ return false;
+ rdr->reset();
+ rdr->seek_off(off + 1);
+ rdr->abbrevs.reset();
+ rdr->abbrevs.seek_off(ab->body_off);
+ return rdr->is_ok();
+}
+
+bool
+rust_crate_reader::die::step_attr(attr &a) const
+{
+ uintptr_t ai, fi;
+ if (rdr->abbrevs.step_attr_form_pair(ai, fi) && rdr->is_ok()) {
+ a.at = (dw_at)ai;
+ a.form = (dw_form)fi;
+
+ uint32_t u32;
+ uint8_t u8;
+
+ switch (a.form) {
+ case DW_FORM_string:
+ return rdr->get_zstr(a.val.str.s, a.val.str.sz);
+ break;
+
+ case DW_FORM_ref_addr:
+ I(rdr->mem.dom, sizeof(uintptr_t) == 4);
+ case DW_FORM_addr:
+ case DW_FORM_data4:
+ rdr->get(u32);
+ a.val.num = (uintptr_t)u32;
+ return rdr->is_ok() || rdr->at_end();
+ break;
+
+ case DW_FORM_data1:
+ case DW_FORM_flag:
+ rdr->get(u8);
+ a.val.num = u8;
+ return rdr->is_ok() || rdr->at_end();
+ break;
+
+ case DW_FORM_block1:
+ rdr->get(u8);
+ rdr->adv(u8);
+ return rdr->is_ok() || rdr->at_end();
+ break;
+
+ default:
+ rdr->mem.dom->log(rust_log::DWARF, " unknown dwarf form: 0x%"
+ PRIxPTR, a.form);
+ rdr->fail();
+ break;
+ }
+ }
+ return false;
+}
+
+bool
+rust_crate_reader::die::find_str_attr(dw_at at, char const *&c)
+{
+ rdr_sess use(rdr);
+ if (is_null())
+ return false;
+ if (start_attrs()) {
+ attr a;
+ while (step_attr(a)) {
+ if (a.at == at && a.is_string()) {
+ c = a.get_str(rdr->mem.dom);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool
+rust_crate_reader::die::find_num_attr(dw_at at, uintptr_t &n)
+{
+ rdr_sess use(rdr);
+ if (is_null())
+ return false;
+ if (start_attrs()) {
+ attr a;
+ while (step_attr(a)) {
+ if (a.at == at && a.is_numeric()) {
+ n = a.get_num(rdr->mem.dom);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool
+rust_crate_reader::die::is_transparent()
+{
+ // "semantically transparent" DIEs are those with
+ // children that serve to structure the tree but have
+ // tags that don't reflect anything in the rust-module
+ // name hierarchy.
+ switch (tag()) {
+ case DW_TAG_compile_unit:
+ case DW_TAG_lexical_block:
+ return (has_children());
+ default:
+ break;
+ }
+ return false;
+}
+
+bool
+rust_crate_reader::die::find_child_by_name(char const *c,
+ die &child,
+ bool exact)
+{
+ rust_dom *dom = rdr->mem.dom;
+ I(dom, has_children());
+ I(dom, !is_null());
+
+ for (die ch = next(); !ch.is_null(); ch = ch.next_sibling()) {
+ char const *ac;
+ if (!exact && ch.is_transparent()) {
+ if (ch.find_child_by_name(c, child, exact)) {
+ return true;
+ }
+ }
+ else if (ch.find_str_attr(DW_AT_name, ac)) {
+ if (strcmp(ac, c) == 0) {
+ child = ch;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool
+rust_crate_reader::die::find_child_by_tag(dw_tag tag, die &child)
+{
+ rust_dom *dom = rdr->mem.dom;
+ I(dom, has_children());
+ I(dom, !is_null());
+
+ for (child = next(); !child.is_null();
+ child = child.next_sibling()) {
+ if (child.tag() == tag)
+ return true;
+ }
+ return false;
+}
+
+rust_crate_reader::die
+rust_crate_reader::die::next() const
+{
+ rust_dom *dom = rdr->mem.dom;
+
+ if (is_null()) {
+ rdr->seek_off(off + 1);
+ return die(rdr, rdr->tell_off());
+ }
+
+ {
+ rdr_sess use(rdr);
+ if (start_attrs()) {
+ attr a;
+ while (step_attr(a)) {
+ I(dom, !(a.is_numeric() && a.is_string()));
+ if (a.is_numeric())
+ dom->log(rust_log::DWARF, " attr num: 0x%"
+ PRIxPTR, a.get_num(dom));
+ else if (a.is_string())
+ dom->log(rust_log::DWARF, " attr str: %s",
+ a.get_str(dom));
+ else
+ dom->log(rust_log::DWARF, " attr ??:");
+ }
+ }
+ }
+ return die(rdr, rdr->tell_off());
+}
+
+rust_crate_reader::die
+rust_crate_reader::die::next_sibling() const
+{
+ // FIXME: use DW_AT_sibling, when present.
+ if (has_children()) {
+ // rdr->mem.dom->log(rust_log::DWARF, "+++ children of die 0x%"
+ // PRIxPTR, off);
+ die child = next();
+ while (!child.is_null())
+ child = child.next_sibling();
+ // rdr->mem.dom->log(rust_log::DWARF, "--- children of die 0x%"
+ // PRIxPTR, off);
+ return child.next();
+ } else {
+ return next();
+ }
+}
+
+
+rust_crate_reader::die
+rust_crate_reader::die_reader::first_die()
+{
+ reset();
+ seek_off(cu_base
+ + sizeof(dwarf_vers)
+ + sizeof(cu_abbrev_off)
+ + sizeof(sizeof_addr));
+ return die(this, tell_off());
+}
+
+void
+rust_crate_reader::die_reader::dump()
+{
+ rust_dom *dom = mem.dom;
+ die d = first_die();
+ while (!d.is_null())
+ d = d.next_sibling();
+ I(dom, d.is_null());
+ I(dom, d.off == mem.lim - mem.base);
+}
+
+
+rust_crate_reader::die_reader::die_reader(rust_crate::mem_area &die_mem,
+ abbrev_reader &abbrevs)
+ : mem_reader(die_mem),
+ abbrevs(abbrevs),
+ cu_unit_length(0),
+ cu_base(0),
+ dwarf_vers(0),
+ cu_abbrev_off(0),
+ sizeof_addr(0),
+ in_use(false)
+{
+ rust_dom *dom = mem.dom;
+
+ rdr_sess use(this);
+
+ get(cu_unit_length);
+ cu_base = tell_off();
+
+ get(dwarf_vers);
+ get(cu_abbrev_off);
+ get(sizeof_addr);
+
+ if (is_ok()) {
+ dom->log(rust_log::DWARF, "new root CU at 0x%" PRIxPTR, die_mem.base);
+ dom->log(rust_log::DWARF, "CU unit length: %" PRId32, cu_unit_length);
+ dom->log(rust_log::DWARF, "dwarf version: %" PRId16, dwarf_vers);
+ dom->log(rust_log::DWARF, "CU abbrev off: %" PRId32, cu_abbrev_off);
+ dom->log(rust_log::DWARF, "size of address: %" PRId8, sizeof_addr);
+ I(dom, sizeof_addr == sizeof(uintptr_t));
+ I(dom, dwarf_vers >= 2);
+ I(dom, cu_base + cu_unit_length == die_mem.lim - die_mem.base);
+ } else {
+ dom->log(rust_log::DWARF, "failed to read root CU header");
+ }
+}
+
+rust_crate_reader::die_reader::~die_reader() {
+}
+
+
+rust_crate_reader::rust_crate_reader(rust_dom *dom,
+ rust_crate const *crate)
+ : dom(dom),
+ crate(crate),
+ abbrev_mem(crate->get_debug_abbrev(dom)),
+ abbrevs(abbrev_mem),
+ die_mem(crate->get_debug_info(dom)),
+ dies(die_mem, abbrevs)
+{
+ dom->log(rust_log::MEM, "crate_reader on crate: 0x%" PRIxPTR, this);
+ dom->log(rust_log::MEM, "debug_abbrev: 0x%" PRIxPTR, abbrev_mem.base);
+ dom->log(rust_log::MEM, "debug_info: 0x%" PRIxPTR, die_mem.base);
+ // For now, perform diagnostics only.
+ dies.dump();
+}
+
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
diff --git a/src/rt/rust_dom.cpp b/src/rt/rust_dom.cpp
new file mode 100644
index 00000000..3b5e23b2
--- /dev/null
+++ b/src/rt/rust_dom.cpp
@@ -0,0 +1,271 @@
+
+#include <stdarg.h>
+#include "rust_internal.h"
+
+template class ptr_vec<rust_task>;
+
+rust_dom::rust_dom(rust_srv *srv, rust_crate const *root_crate) :
+ interrupt_flag(0),
+ root_crate(root_crate),
+ _log(srv, this),
+ srv(srv),
+ running_tasks(this),
+ blocked_tasks(this),
+ dead_tasks(this),
+ caches(this),
+ root_task(NULL),
+ curr_task(NULL),
+ rval(0)
+{
+ logptr("new dom", (uintptr_t)this);
+ memset(&rctx, 0, sizeof(rctx));
+
+#ifdef __WIN32__
+ {
+ HCRYPTPROV hProv;
+ win32_require
+ (_T("CryptAcquireContext"),
+ CryptAcquireContext(&hProv, NULL, NULL, PROV_RSA_FULL,
+ CRYPT_VERIFYCONTEXT|CRYPT_SILENT));
+ win32_require
+ (_T("CryptGenRandom"),
+ CryptGenRandom(hProv, sizeof(rctx.randrsl),
+ (BYTE*)(&rctx.randrsl)));
+ win32_require
+ (_T("CryptReleaseContext"),
+ CryptReleaseContext(hProv, 0));
+ }
+#else
+ int fd = open("/dev/urandom", O_RDONLY);
+ I(this, fd > 0);
+ I(this, read(fd, (void*) &rctx.randrsl, sizeof(rctx.randrsl))
+ == sizeof(rctx.randrsl));
+ I(this, close(fd) == 0);
+ pthread_attr_init(&attr);
+ pthread_attr_setstacksize(&attr, 1024 * 1024);
+ pthread_attr_setdetachstate(&attr, true);
+#endif
+ randinit(&rctx, 1);
+
+ root_task = new (this) rust_task(this, NULL);
+}
+
+static void
+del_all_tasks(rust_dom *dom, ptr_vec<rust_task> *v) {
+ I(dom, v);
+ while (v->length()) {
+ dom->log(rust_log::TASK, "deleting task %" PRIdPTR, v->length() - 1);
+ delete v->pop();
+ }
+}
+
+rust_dom::~rust_dom() {
+ log(rust_log::TASK, "deleting all running tasks");
+ del_all_tasks(this, &running_tasks);
+ log(rust_log::TASK, "deleting all blocked tasks");
+ del_all_tasks(this, &blocked_tasks);
+ log(rust_log::TASK, "deleting all dead tasks");
+ del_all_tasks(this, &dead_tasks);
+#ifndef __WIN32__
+ pthread_attr_destroy(&attr);
+#endif
+ while (caches.length())
+ delete caches.pop();
+}
+
+void
+rust_dom::activate(rust_task *task) {
+ curr_task = task;
+ root_crate->get_activate_glue()(task);
+ curr_task = NULL;
+}
+
+void
+rust_dom::log(uint32_t type_bits, char const *fmt, ...) {
+ char buf[256];
+ if (_log.is_tracing(type_bits)) {
+ va_list args;
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ _log.trace_ln(type_bits, buf);
+ va_end(args);
+ }
+}
+
+rust_log &
+rust_dom::get_log() {
+ return _log;
+}
+
+void
+rust_dom::logptr(char const *msg, uintptr_t ptrval) {
+ log(rust_log::MEM, "%s 0x%" PRIxPTR, msg, ptrval);
+}
+
+template<typename T> void
+rust_dom::logptr(char const *msg, T* ptrval) {
+ log(rust_log::MEM, "%s 0x%" PRIxPTR, msg, (uintptr_t)ptrval);
+}
+
+
+void
+rust_dom::fail() {
+ log(rust_log::DOM, "domain 0x%" PRIxPTR " root task failed", this);
+ I(this, rval == 0);
+ rval = 1;
+}
+
+void *
+rust_dom::malloc(size_t sz) {
+ void *p = srv->malloc(sz);
+ I(this, p);
+ log(rust_log::MEM, "rust_dom::malloc(%d) -> 0x%" PRIxPTR,
+ sz, p);
+ return p;
+}
+
+void *
+rust_dom::calloc(size_t sz) {
+ void *p = this->malloc(sz);
+ memset(p, 0, sz);
+ return p;
+}
+
+void *
+rust_dom::realloc(void *p, size_t sz) {
+ void *p1 = srv->realloc(p, sz);
+ I(this, p1);
+ log(rust_log::MEM, "rust_dom::realloc(0x%" PRIxPTR ", %d) -> 0x%" PRIxPTR,
+ p, sz, p1);
+ return p1;
+}
+
+void
+rust_dom::free(void *p) {
+ log(rust_log::MEM, "rust_dom::free(0x%" PRIxPTR ")", p);
+ I(this, p);
+ srv->free(p);
+}
+
+#ifdef __WIN32__
+void
+rust_dom::win32_require(LPCTSTR fn, BOOL ok) {
+ if (!ok) {
+ LPTSTR buf;
+ DWORD err = GetLastError();
+ FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, err,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPTSTR) &buf, 0, NULL );
+ log(rust_log::ERR, "%s failed with error %ld: %s", fn, err, buf);
+ LocalFree((HLOCAL)buf);
+ I(this, ok);
+ }
+}
+#endif
+
+size_t
+rust_dom::n_live_tasks()
+{
+ return running_tasks.length() + blocked_tasks.length();
+}
+
+void
+rust_dom::add_task_to_state_vec(ptr_vec<rust_task> *v, rust_task *task)
+{
+ log(rust_log::MEM|rust_log::TASK,
+ "adding task 0x%" PRIxPTR " in state '%s' to vec 0x%" PRIxPTR,
+ (uintptr_t)task, state_vec_name(v), (uintptr_t)v);
+ v->push(task);
+}
+
+
+void
+rust_dom::remove_task_from_state_vec(ptr_vec<rust_task> *v, rust_task *task)
+{
+ log(rust_log::MEM|rust_log::TASK,
+ "removing task 0x%" PRIxPTR " in state '%s' from vec 0x%" PRIxPTR,
+ (uintptr_t)task, state_vec_name(v), (uintptr_t)v);
+ I(this, (*v)[task->idx] == task);
+ v->swapdel(task);
+}
+
+const char *
+rust_dom::state_vec_name(ptr_vec<rust_task> *v)
+{
+ if (v == &running_tasks)
+ return "running";
+ if (v == &blocked_tasks)
+ return "blocked";
+ I(this, v == &dead_tasks);
+ return "dead";
+}
+
+void
+rust_dom::reap_dead_tasks()
+{
+ for (size_t i = 0; i < dead_tasks.length(); ) {
+ rust_task *t = dead_tasks[i];
+ if (t == root_task || t->refcnt == 0) {
+ I(this, !t->waiting_tasks.length());
+ dead_tasks.swapdel(t);
+ log(rust_log::TASK,
+ "deleting unreferenced dead task 0x%" PRIxPTR, t);
+ delete t;
+ continue;
+ }
+ ++i;
+ }
+}
+
+rust_task *
+rust_dom::sched()
+{
+ I(this, this);
+ // FIXME: in the face of failing tasks, this is not always right.
+ // I(this, n_live_tasks() > 0);
+ if (running_tasks.length() > 0) {
+ size_t i = rand(&rctx);
+ i %= running_tasks.length();
+ return (rust_task *)running_tasks[i];
+ }
+ log(rust_log::DOM|rust_log::TASK,
+ "no schedulable tasks");
+ return NULL;
+}
+
+rust_crate_cache *
+rust_dom::get_cache(rust_crate const *crate) {
+ log(rust_log::CACHE,
+ "looking for crate-cache for crate 0x%" PRIxPTR, crate);
+ rust_crate_cache *cache = NULL;
+ for (size_t i = 0; i < caches.length(); ++i) {
+ rust_crate_cache *c = caches[i];
+ if (c->crate == crate) {
+ cache = c;
+ break;
+ }
+ }
+ if (!cache) {
+ log(rust_log::CACHE,
+ "making new crate-cache for crate 0x%" PRIxPTR, crate);
+ cache = new (this) rust_crate_cache(this, crate);
+ caches.push(cache);
+ }
+ cache->ref();
+ return cache;
+}
+
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 70;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
diff --git a/src/rt/rust_dwarf.h b/src/rt/rust_dwarf.h
new file mode 100644
index 00000000..8eff3b8c
--- /dev/null
+++ b/src/rt/rust_dwarf.h
@@ -0,0 +1,198 @@
+#ifndef RUST_DWARF_H
+#define RUST_DWARF_H
+
+enum
+dw_form
+ {
+ DW_FORM_addr = 0x01,
+ DW_FORM_block2 = 0x03,
+ DW_FORM_block4 = 0x04,
+ DW_FORM_data2 = 0x05,
+ DW_FORM_data4 = 0x06,
+ DW_FORM_data8 = 0x07,
+ DW_FORM_string = 0x08,
+ DW_FORM_block = 0x09,
+ DW_FORM_block1 = 0x0a,
+ DW_FORM_data1 = 0x0b,
+ DW_FORM_flag = 0x0c,
+ DW_FORM_sdata = 0x0d,
+ DW_FORM_strp = 0x0e,
+ DW_FORM_udata = 0x0f,
+ DW_FORM_ref_addr = 0x10,
+ DW_FORM_ref1 = 0x11,
+ DW_FORM_ref2 = 0x12,
+ DW_FORM_ref4 = 0x13,
+ DW_FORM_ref8 = 0x14,
+ DW_FORM_ref_udata = 0x15,
+ DW_FORM_indirect = 0x16
+ };
+
+enum
+dw_at
+ {
+ DW_AT_sibling = 0x01,
+ DW_AT_location = 0x02,
+ DW_AT_name = 0x03,
+ DW_AT_ordering = 0x09,
+ DW_AT_byte_size = 0x0b,
+ DW_AT_bit_offset = 0x0c,
+ DW_AT_bit_size = 0x0d,
+ DW_AT_stmt_list = 0x10,
+ DW_AT_low_pc = 0x11,
+ DW_AT_high_pc = 0x12,
+ DW_AT_language = 0x13,
+ DW_AT_discr = 0x15,
+ DW_AT_discr_value = 0x16,
+ DW_AT_visibility = 0x17,
+ DW_AT_import = 0x18,
+ DW_AT_string_length = 0x19,
+ DW_AT_common_reference = 0x1a,
+ DW_AT_comp_dir = 0x1b,
+ DW_AT_const_value = 0x1c,
+ DW_AT_containing_type = 0x1d,
+ DW_AT_default_value = 0x1e,
+ DW_AT_inline = 0x20,
+ DW_AT_is_optional = 0x21,
+ DW_AT_lower_bound = 0x22,
+ DW_AT_producer = 0x25,
+ DW_AT_prototyped = 0x27,
+ DW_AT_return_addr = 0x2a,
+ DW_AT_start_scope = 0x2c,
+ DW_AT_bit_stride = 0x2e,
+ DW_AT_upper_bound = 0x2f,
+ DW_AT_abstract_origin = 0x31,
+ DW_AT_accessibility = 0x32,
+ DW_AT_address_class = 0x33,
+ DW_AT_artificial = 0x34,
+ DW_AT_base_types = 0x35,
+ DW_AT_calling_convention = 0x36,
+ DW_AT_count = 0x37,
+ DW_AT_data_member_location = 0x38,
+ DW_AT_decl_column = 0x39,
+ DW_AT_decl_file = 0x3a,
+ DW_AT_decl_line = 0x3b,
+ DW_AT_declaration = 0x3c,
+ DW_AT_discr_list = 0x3d,
+ DW_AT_encoding = 0x3e,
+ DW_AT_external = 0x3f,
+ DW_AT_frame_base = 0x40,
+ DW_AT_friend = 0x41,
+ DW_AT_identifier_case = 0x42,
+ DW_AT_macro_info = 0x43,
+ DW_AT_namelist_item = 0x44,
+ DW_AT_priority = 0x45,
+ DW_AT_segment = 0x46,
+ DW_AT_specification = 0x47,
+ DW_AT_static_link = 0x48,
+ DW_AT_type = 0x49,
+ DW_AT_use_location = 0x4a,
+ DW_AT_variable_parameter = 0x4b,
+ DW_AT_virtuality = 0x4c,
+ DW_AT_vtable_elem_location = 0x4d,
+ DW_AT_allocated = 0x4e,
+ DW_AT_associated = 0x4f,
+ DW_AT_data_location = 0x50,
+ DW_AT_byte_stride = 0x51,
+ DW_AT_entry_pc = 0x52,
+ DW_AT_use_UTF8 = 0x53,
+ DW_AT_extension = 0x54,
+ DW_AT_ranges = 0x55,
+ DW_AT_trampoline = 0x56,
+ DW_AT_call_column = 0x57,
+ DW_AT_call_file = 0x58,
+ DW_AT_call_line = 0x59,
+ DW_AT_description = 0x5a,
+ DW_AT_binary_scale = 0x5b,
+ DW_AT_decimal_scale = 0x5c,
+ DW_AT_small = 0x5d,
+ DW_AT_decimal_sign = 0x5e,
+ DW_AT_digit_count = 0x5f,
+ DW_AT_picture_string = 0x60,
+ DW_AT_mutable = 0x61,
+ DW_AT_threads_scaled = 0x62,
+ DW_AT_explicit = 0x63,
+ DW_AT_object_pointer = 0x64,
+ DW_AT_endianity = 0x65,
+ DW_AT_elemental = 0x66,
+ DW_AT_pure = 0x67,
+ DW_AT_recursive = 0x68,
+ DW_AT_lo_user = 0x2000,
+ DW_AT_hi_user = 0x3fff
+};
+
+enum
+dw_tag
+ {
+ DW_TAG_array_type = 0x01,
+ DW_TAG_class_type = 0x02,
+ DW_TAG_entry_point = 0x03,
+ DW_TAG_enumeration_type = 0x04,
+ DW_TAG_formal_parameter = 0x05,
+ DW_TAG_imported_declaration = 0x08,
+ DW_TAG_label = 0x0a,
+ DW_TAG_lexical_block = 0x0b,
+ DW_TAG_member = 0x0d,
+ DW_TAG_pointer_type = 0x0f,
+ DW_TAG_reference_type = 0x10,
+ DW_TAG_compile_unit = 0x11,
+ DW_TAG_string_type = 0x12,
+ DW_TAG_structure_type = 0x13,
+ DW_TAG_subroutine_type = 0x15,
+ DW_TAG_typedef = 0x16,
+ DW_TAG_union_type = 0x17,
+ DW_TAG_unspecified_parameters = 0x18,
+ DW_TAG_variant = 0x19,
+ DW_TAG_common_block = 0x1a,
+ DW_TAG_common_inclusion = 0x1b,
+ DW_TAG_inheritance = 0x1c,
+ DW_TAG_inlined_subroutine = 0x1d,
+ DW_TAG_module = 0x1e,
+ DW_TAG_ptr_to_member_type = 0x1f,
+ DW_TAG_set_type = 0x20,
+ DW_TAG_subrange_type = 0x21,
+ DW_TAG_with_stmt = 0x22,
+ DW_TAG_access_declaration = 0x23,
+ DW_TAG_base_type = 0x24,
+ DW_TAG_catch_block = 0x25,
+ DW_TAG_const_type = 0x26,
+ DW_TAG_constant = 0x27,
+ DW_TAG_enumerator = 0x28,
+ DW_TAG_file_type = 0x29,
+ DW_TAG_friend = 0x2a,
+ DW_TAG_namelist = 0x2b,
+ DW_TAG_namelist_item = 0x2c,
+ DW_TAG_packed_type = 0x2d,
+ DW_TAG_subprogram = 0x2e,
+ DW_TAG_template_type_parameter = 0x2f,
+ DW_TAG_template_value_parameter = 0x30,
+ DW_TAG_thrown_type = 0x31,
+ DW_TAG_try_block = 0x32,
+ DW_TAG_variant_part = 0x33,
+ DW_TAG_variable = 0x34,
+ DW_TAG_volatile_type = 0x35,
+ DW_TAG_dwarf_procedure = 0x36,
+ DW_TAG_restrict_type = 0x37,
+ DW_TAG_interface_type = 0x38,
+ DW_TAG_namespace = 0x39,
+ DW_TAG_imported_module = 0x3a,
+ DW_TAG_unspecified_type = 0x3b,
+ DW_TAG_partial_unit = 0x3c,
+ DW_TAG_imported_unit = 0x3d,
+ DW_TAG_condition = 0x3f,
+ DW_TAG_shared_type = 0x40,
+ DW_TAG_lo_user = 0x4080,
+ DW_TAG_hi_user = 0xffff,
+ };
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
+
+#endif
diff --git a/src/rt/rust_internal.h b/src/rt/rust_internal.h
new file mode 100644
index 00000000..c393b210
--- /dev/null
+++ b/src/rt/rust_internal.h
@@ -0,0 +1,730 @@
+#ifndef RUST_INTERNAL_H
+#define RUST_INTERNAL_H
+
+#define __STDC_LIMIT_MACROS 1
+#define __STDC_CONSTANT_MACROS 1
+#define __STDC_FORMAT_MACROS 1
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <stdio.h>
+#include <string.h>
+
+#include "rust.h"
+
+#include "rand.h"
+#include "rust_log.h"
+#include "uthash.h"
+
+#if defined(__WIN32__)
+extern "C" {
+#include <windows.h>
+#include <tchar.h>
+#include <wincrypt.h>
+}
+#elif defined(__GNUC__)
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <dlfcn.h>
+#include <pthread.h>
+#include <errno.h>
+#else
+#error "Platform not supported."
+#endif
+
+#ifndef __i386__
+#error "Target CPU not supported."
+#endif
+
+#define I(dom, e) ((e) ? (void)0 : \
+ (dom)->srv->fatal(#e, __FILE__, __LINE__))
+
+struct rust_task;
+struct rust_port;
+class rust_chan;
+struct rust_token;
+struct rust_dom;
+class rust_crate;
+class rust_crate_cache;
+class lockfree_queue;
+
+struct stk_seg;
+struct type_desc;
+struct frame_glue_fns;
+
+// This drives our preemption scheme.
+
+static size_t const TIME_SLICE_IN_MS = 10;
+
+// Every reference counted object should derive from this base class.
+
+template <typename T>
+struct
+rc_base
+{
+ size_t refcnt;
+
+ void ref() {
+ ++refcnt;
+ }
+
+ void deref() {
+ if (--refcnt == 0) {
+ delete (T*)this;
+ }
+ }
+
+ rc_base();
+ ~rc_base();
+};
+
+template <typename T>
+struct
+dom_owned
+{
+ void operator delete(void *ptr) {
+ ((T *)ptr)->dom->free(ptr);
+ }
+};
+
+template <typename T>
+struct
+task_owned
+{
+ void operator delete(void *ptr) {
+ ((T *)ptr)->task->dom->free(ptr);
+ }
+};
+
+
+// Helper class used regularly elsewhere.
+
+template <typename T>
+class
+ptr_vec : public dom_owned<ptr_vec<T> >
+{
+ static const size_t INIT_SIZE = 8;
+
+ rust_dom *dom;
+ size_t alloc;
+ size_t fill;
+ T **data;
+
+public:
+ ptr_vec(rust_dom *dom);
+ ~ptr_vec();
+
+ size_t length() {
+ return fill;
+ }
+
+ T *& operator[](size_t offset);
+ void push(T *p);
+ T *pop();
+ void trim(size_t fill);
+ void swapdel(T* p);
+};
+
+struct
+rust_dom
+{
+ // Fields known to the compiler:
+ uintptr_t interrupt_flag;
+
+ // Fields known only by the runtime:
+
+ // NB: the root crate must remain in memory until the root of the
+ // tree of domains exits. All domains within this tree have a
+ // copy of this root_crate value and use it for finding utility
+ // glue.
+ rust_crate const *root_crate;
+ rust_log _log;
+ rust_srv *srv;
+ // uint32_t logbits;
+ ptr_vec<rust_task> running_tasks;
+ ptr_vec<rust_task> blocked_tasks;
+ ptr_vec<rust_task> dead_tasks;
+ ptr_vec<rust_crate_cache> caches;
+ randctx rctx;
+ rust_task *root_task;
+ rust_task *curr_task;
+ int rval;
+ lockfree_queue *incoming; // incoming messages from other threads
+
+#ifndef __WIN32__
+ pthread_attr_t attr;
+#endif
+
+ rust_dom(rust_srv *srv, rust_crate const *root_crate);
+ ~rust_dom();
+
+ void activate(rust_task *task);
+ void log(uint32_t logbit, char const *fmt, ...);
+ rust_log & get_log();
+ void logptr(char const *msg, uintptr_t ptrval);
+ template<typename T>
+ void logptr(char const *msg, T* ptrval);
+ void fail();
+ void *malloc(size_t sz);
+ void *calloc(size_t sz);
+ void *realloc(void *data, size_t sz);
+ void free(void *p);
+
+#ifdef __WIN32__
+ void win32_require(LPCTSTR fn, BOOL ok);
+#endif
+
+ rust_crate_cache *get_cache(rust_crate const *crate);
+ size_t n_live_tasks();
+ void add_task_to_state_vec(ptr_vec<rust_task> *v, rust_task *task);
+ void remove_task_from_state_vec(ptr_vec<rust_task> *v, rust_task *task);
+ const char *state_vec_name(ptr_vec<rust_task> *v);
+
+ void reap_dead_tasks();
+ rust_task *sched();
+};
+
+inline void *operator new(size_t sz, void *mem) {
+ return mem;
+}
+
+inline void *operator new(size_t sz, rust_dom *dom) {
+ return dom->malloc(sz);
+}
+
+inline void *operator new[](size_t sz, rust_dom *dom) {
+ return dom->malloc(sz);
+}
+
+inline void *operator new(size_t sz, rust_dom &dom) {
+ return dom.malloc(sz);
+}
+
+inline void *operator new[](size_t sz, rust_dom &dom) {
+ return dom.malloc(sz);
+}
+
+struct
+rust_timer
+{
+ // FIXME: This will probably eventually need replacement
+ // with something more sophisticated and integrated with
+ // an IO event-handling library, when we have such a thing.
+ // For now it's just the most basic "thread that can interrupt
+ // its associated domain-thread" device, so that we have
+ // *some* form of task-preemption.
+ rust_dom &dom;
+ uintptr_t exit_flag;
+
+#if defined(__WIN32__)
+ HANDLE thread;
+#else
+ pthread_attr_t attr;
+ pthread_t thread;
+#endif
+
+ rust_timer(rust_dom &dom);
+ ~rust_timer();
+};
+
+#include "rust_util.h"
+
+// Crates.
+
+template<typename T> T*
+crate_rel(rust_crate const *crate, T *t) {
+ return (T*)(((uintptr_t)crate) + ((ptrdiff_t)t));
+}
+
+template<typename T> T const*
+crate_rel(rust_crate const *crate, T const *t) {
+ return (T const*)(((uintptr_t)crate) + ((ptrdiff_t)t));
+}
+
+typedef void CDECL (*activate_glue_ty)(rust_task *);
+
+class
+rust_crate
+{
+ // The following fields are emitted by the compiler for the static
+ // rust_crate object inside each compiled crate.
+
+ ptrdiff_t image_base_off; // (Loaded image base) - this.
+ uintptr_t self_addr; // Un-relocated addres of 'this'.
+
+ ptrdiff_t debug_abbrev_off; // Offset from this to .debug_abbrev.
+ size_t debug_abbrev_sz; // Size of .debug_abbrev.
+
+ ptrdiff_t debug_info_off; // Offset from this to .debug_info.
+ size_t debug_info_sz; // Size of .debug_info.
+
+ ptrdiff_t activate_glue_off;
+ ptrdiff_t exit_task_glue_off;
+ ptrdiff_t unwind_glue_off;
+ ptrdiff_t yield_glue_off;
+
+public:
+
+ size_t n_rust_syms;
+ size_t n_c_syms;
+ size_t n_libs;
+
+ // Crates are immutable, constructed by the compiler.
+
+ uintptr_t get_image_base() const;
+ ptrdiff_t get_relocation_diff() const;
+ activate_glue_ty get_activate_glue() const;
+ uintptr_t get_exit_task_glue() const;
+ uintptr_t get_unwind_glue() const;
+ uintptr_t get_yield_glue() const;
+ struct mem_area
+ {
+ rust_dom *dom;
+ uintptr_t base;
+ uintptr_t lim;
+ mem_area(rust_dom *dom, uintptr_t pos, size_t sz);
+ };
+
+ mem_area get_debug_info(rust_dom *dom) const;
+ mem_area get_debug_abbrev(rust_dom *dom) const;
+};
+
+
+struct type_desc {
+ // First part of type_desc is known to compiler.
+ // first_param = &descs[1] if dynamic, null if static.
+ const type_desc **first_param;
+ size_t size;
+ size_t align;
+ uintptr_t copy_glue_off;
+ uintptr_t drop_glue_off;
+ uintptr_t free_glue_off;
+ uintptr_t mark_glue_off; // For GC.
+ uintptr_t obj_drop_glue_off; // For custom destructors.
+
+ // Residual fields past here are known only to runtime.
+ UT_hash_handle hh;
+ size_t n_descs;
+ const type_desc *descs[];
+};
+
+class
+rust_crate_cache : public dom_owned<rust_crate_cache>,
+ public rc_base<rust_crate_cache>
+{
+public:
+ class lib :
+ public rc_base<lib>, public dom_owned<lib>
+ {
+ uintptr_t handle;
+ public:
+ rust_dom *dom;
+ lib(rust_dom *dom, char const *name);
+ uintptr_t get_handle();
+ ~lib();
+ };
+
+ class c_sym :
+ public rc_base<c_sym>, public dom_owned<c_sym>
+ {
+ uintptr_t val;
+ lib *library;
+ public:
+ rust_dom *dom;
+ c_sym(rust_dom *dom, lib *library, char const *name);
+ uintptr_t get_val();
+ ~c_sym();
+ };
+
+ class rust_sym :
+ public rc_base<rust_sym>, public dom_owned<rust_sym>
+ {
+ uintptr_t val;
+ c_sym *crate_sym;
+ public:
+ rust_dom *dom;
+ rust_sym(rust_dom *dom, rust_crate const *curr_crate,
+ c_sym *crate_sym, char const **path);
+ uintptr_t get_val();
+ ~rust_sym();
+ };
+
+ lib *get_lib(size_t n, char const *name);
+ c_sym *get_c_sym(size_t n, lib *library, char const *name);
+ rust_sym *get_rust_sym(size_t n,
+ rust_dom *dom,
+ rust_crate const *curr_crate,
+ c_sym *crate_sym,
+ char const **path);
+ type_desc *get_type_desc(size_t size,
+ size_t align,
+ size_t n_descs,
+ type_desc const **descs);
+
+private:
+
+ rust_sym **rust_syms;
+ c_sym **c_syms;
+ lib **libs;
+ type_desc *type_descs;
+
+public:
+
+ rust_crate const *crate;
+ rust_dom *dom;
+ size_t idx;
+
+ rust_crate_cache(rust_dom *dom,
+ rust_crate const *crate);
+ ~rust_crate_cache();
+ void flush();
+};
+
+#include "rust_dwarf.h"
+
+class
+rust_crate_reader
+{
+ struct mem_reader
+ {
+ rust_crate::mem_area &mem;
+ bool ok;
+ uintptr_t pos;
+
+ bool is_ok();
+ bool at_end();
+ void fail();
+ void reset();
+ mem_reader(rust_crate::mem_area &m);
+ size_t tell_abs();
+ size_t tell_off();
+ void seek_abs(uintptr_t p);
+ void seek_off(uintptr_t p);
+
+ template<typename T>
+ void get(T &out) {
+ if (pos < mem.base
+ || pos >= mem.lim
+ || pos + sizeof(T) > mem.lim)
+ ok = false;
+ if (!ok)
+ return;
+ out = *((T*)(pos));
+ pos += sizeof(T);
+ ok &= !at_end();
+ I(mem.dom, at_end() || (mem.base <= pos && pos < mem.lim));
+ }
+
+ template<typename T>
+ void get_uleb(T &out) {
+ out = T(0);
+ for (size_t i = 0; i < sizeof(T) && ok; ++i) {
+ uint8_t byte;
+ get(byte);
+ out <<= 7;
+ out |= byte & 0x7f;
+ if (!(byte & 0x80))
+ break;
+ }
+ I(mem.dom, at_end() || (mem.base <= pos && pos < mem.lim));
+ }
+
+ template<typename T>
+ void adv_sizeof(T &) {
+ adv(sizeof(T));
+ }
+
+ bool adv_zstr(size_t sz);
+ bool get_zstr(char const *&c, size_t &sz);
+ void adv(size_t amt);
+ };
+
+ struct
+ abbrev : dom_owned<abbrev>
+ {
+ rust_dom *dom;
+ uintptr_t body_off;
+ size_t body_sz;
+ uintptr_t tag;
+ uint8_t has_children;
+ size_t idx;
+ abbrev(rust_dom *dom, uintptr_t body_off, size_t body_sz,
+ uintptr_t tag, uint8_t has_children);
+ };
+
+ class
+ abbrev_reader : public mem_reader
+ {
+ ptr_vec<abbrev> abbrevs;
+ public:
+ abbrev_reader(rust_crate::mem_area &abbrev_mem);
+ abbrev *get_abbrev(size_t i);
+ bool step_attr_form_pair(uintptr_t &attr, uintptr_t &form);
+ ~abbrev_reader();
+ };
+
+ rust_dom *dom;
+ size_t idx;
+ rust_crate const *crate;
+
+ rust_crate::mem_area abbrev_mem;
+ abbrev_reader abbrevs;
+
+ rust_crate::mem_area die_mem;
+
+public:
+
+ struct
+ attr
+ {
+ dw_form form;
+ dw_at at;
+ union {
+ struct {
+ char const *s;
+ size_t sz;
+ } str;
+ uintptr_t num;
+ } val;
+
+ bool is_numeric() const;
+ bool is_string() const;
+ size_t get_ssz(rust_dom *dom) const;
+ char const *get_str(rust_dom *dom) const;
+ uintptr_t get_num(rust_dom *dom) const;
+ bool is_unknown() const;
+ };
+
+ struct die_reader;
+
+ struct
+ die
+ {
+ die_reader *rdr;
+ uintptr_t off;
+ abbrev *ab;
+ bool using_rdr;
+
+ die(die_reader *rdr, uintptr_t off);
+ bool is_null() const;
+ bool has_children() const;
+ dw_tag tag() const;
+ bool start_attrs() const;
+ bool step_attr(attr &a) const;
+ bool find_str_attr(dw_at at, char const *&c);
+ bool find_num_attr(dw_at at, uintptr_t &n);
+ bool is_transparent();
+ bool find_child_by_name(char const *c, die &child,
+ bool exact=false);
+ bool find_child_by_tag(dw_tag tag, die &child);
+ die next() const;
+ die next_sibling() const;
+ };
+
+ struct
+ rdr_sess
+ {
+ die_reader *rdr;
+ rdr_sess(die_reader *rdr);
+ ~rdr_sess();
+ };
+
+ struct
+ die_reader : public mem_reader
+ {
+ abbrev_reader &abbrevs;
+ uint32_t cu_unit_length;
+ uintptr_t cu_base;
+ uint16_t dwarf_vers;
+ uint32_t cu_abbrev_off;
+ uint8_t sizeof_addr;
+ bool in_use;
+
+ die first_die();
+ void dump();
+ die_reader(rust_crate::mem_area &die_mem,
+ abbrev_reader &abbrevs);
+ ~die_reader();
+ };
+ die_reader dies;
+ rust_crate_reader(rust_dom *dom, rust_crate const *crate);
+};
+
+
+// A cond(ition) is something we can block on. This can be a channel
+// (writing), a port (reading) or a task (waiting).
+
+struct
+rust_cond
+{
+};
+
+// An alarm can be put into a wait queue and the task will be notified
+// when the wait queue is flushed.
+
+struct
+rust_alarm
+{
+ rust_task *receiver;
+ size_t idx;
+
+ rust_alarm(rust_task *receiver);
+};
+
+
+typedef ptr_vec<rust_alarm> rust_wait_queue;
+
+
+struct stk_seg {
+ unsigned int valgrind_id;
+ uintptr_t limit;
+ uint8_t data[];
+};
+
+struct frame_glue_fns {
+ uintptr_t mark_glue_off;
+ uintptr_t drop_glue_off;
+ uintptr_t reloc_glue_off;
+};
+
+struct
+rust_task : public rc_base<rust_task>,
+ public dom_owned<rust_task>,
+ public rust_cond
+{
+ // Fields known to the compiler.
+ stk_seg *stk;
+ uintptr_t runtime_sp; // Runtime sp while task running.
+ uintptr_t rust_sp; // Saved sp when not running.
+ uintptr_t gc_alloc_chain; // Linked list of GC allocations.
+ rust_dom *dom;
+ rust_crate_cache *cache;
+
+ // Fields known only to the runtime.
+ ptr_vec<rust_task> *state;
+ rust_cond *cond;
+ uintptr_t* dptr; // Rendezvous pointer for send/recv.
+ rust_task *spawner; // Parent-link.
+ size_t idx;
+
+ // Wait queue for tasks waiting for this task.
+ rust_wait_queue waiting_tasks;
+ rust_alarm alarm;
+
+ rust_task(rust_dom *dom,
+ rust_task *spawner);
+ ~rust_task();
+
+ void start(uintptr_t exit_task_glue,
+ uintptr_t spawnee_fn,
+ uintptr_t args,
+ size_t callsz);
+ void grow(size_t n_frame_bytes);
+ bool running();
+ bool blocked();
+ bool blocked_on(rust_cond *cond);
+ bool dead();
+
+ const char *state_str();
+ void transition(ptr_vec<rust_task> *svec, ptr_vec<rust_task> *dvec);
+
+ void block(rust_cond *on);
+ void wakeup(rust_cond *from);
+ void die();
+ void unblock();
+
+ void check_active() { I(dom, dom->curr_task == this); }
+ void check_suspended() { I(dom, dom->curr_task != this); }
+
+ // Swap in some glue code to run when we have returned to the
+ // task's context (assuming we're the active task).
+ void run_after_return(size_t nargs, uintptr_t glue);
+
+ // Swap in some glue code to run when we're next activated
+ // (assuming we're the suspended task).
+ void run_on_resume(uintptr_t glue);
+
+ // Save callee-saved registers and return to the main loop.
+ void yield(size_t nargs);
+
+ // Fail this task (assuming caller-on-stack is different task).
+ void kill();
+
+ // Fail self, assuming caller-on-stack is this task.
+ void fail(size_t nargs);
+
+ // Notify tasks waiting for us that we are about to die.
+ void notify_waiting_tasks();
+
+ uintptr_t get_fp();
+ uintptr_t get_previous_fp(uintptr_t fp);
+ frame_glue_fns *get_frame_glue_fns(uintptr_t fp);
+ rust_crate_cache * get_crate_cache(rust_crate const *curr_crate);
+};
+
+struct rust_port : public rc_base<rust_port>,
+ public task_owned<rust_port>,
+ public rust_cond {
+ rust_task *task;
+ size_t unit_sz;
+ ptr_vec<rust_token> writers;
+ ptr_vec<rust_chan> chans;
+
+ rust_port(rust_task *task, size_t unit_sz);
+ ~rust_port();
+};
+
+struct rust_token : public rust_cond {
+ rust_chan *chan; // Link back to the channel this token belongs to
+ size_t idx; // Index into port->writers.
+ bool submitted; // Whether token is in a port->writers.
+
+ rust_token(rust_chan *chan);
+ ~rust_token();
+
+ bool pending() const;
+ void submit();
+ void withdraw();
+};
+
+
+struct circ_buf : public dom_owned<circ_buf> {
+ static const size_t INIT_CIRC_BUF_UNITS = 8;
+ static const size_t MAX_CIRC_BUF_SIZE = 1 << 24;
+
+ rust_dom *dom;
+ size_t alloc;
+ size_t unit_sz;
+ size_t next;
+ size_t unread;
+ uint8_t *data;
+
+ circ_buf(rust_dom *dom, size_t unit_sz);
+ ~circ_buf();
+
+ void transfer(void *dst);
+ void push(void *src);
+ void shift(void *dst);
+};
+
+#include "rust_chan.h"
+
+int
+rust_main_loop(rust_dom *dom);
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
+
+#endif
diff --git a/src/rt/rust_log.cpp b/src/rt/rust_log.cpp
new file mode 100644
index 00000000..102a2623
--- /dev/null
+++ b/src/rt/rust_log.cpp
@@ -0,0 +1,117 @@
+/*
+ * Logging infrastructure that aims to support multi-threading, indentation
+ * and ansi colors.
+ */
+
+#include "rust_internal.h"
+
+static uint32_t read_type_bit_mask() {
+ uint32_t bits = rust_log::ULOG | rust_log::ERR;
+ char *env_str = getenv("RUST_LOG");
+ if (env_str) {
+ bits = 0;
+ bits |= strstr(env_str, "err") ? rust_log::ERR : 0;
+ bits |= strstr(env_str, "mem") ? rust_log::MEM : 0;
+ bits |= strstr(env_str, "comm") ? rust_log::COMM : 0;
+ bits |= strstr(env_str, "task") ? rust_log::TASK : 0;
+ bits |= strstr(env_str, "up") ? rust_log::UPCALL : 0;
+ bits |= strstr(env_str, "dom") ? rust_log::DOM : 0;
+ bits |= strstr(env_str, "ulog") ? rust_log::ULOG : 0;
+ bits |= strstr(env_str, "trace") ? rust_log::TRACE : 0;
+ bits |= strstr(env_str, "dwarf") ? rust_log::DWARF : 0;
+ bits |= strstr(env_str, "cache") ? rust_log::CACHE : 0;
+ bits |= strstr(env_str, "timer") ? rust_log::TIMER : 0;
+ bits |= strstr(env_str, "all") ? rust_log::ALL : 0;
+ }
+ return bits;
+}
+
+rust_log::ansi_color rust_log::get_type_color(log_type type) {
+ switch (type) {
+ case ERR:
+ return rust_log::RED;
+ case UPCALL:
+ return rust_log::GREEN;
+ case COMM:
+ return rust_log::MAGENTA;
+ case DOM:
+ case TASK:
+ return rust_log::LIGHTTEAL;
+ case MEM:
+ return rust_log::YELLOW;
+ default:
+ return rust_log::WHITE;
+ }
+}
+
+static const char * _foreground_colors[] = { "[30m", "[1;30m", "[37m",
+ "[31m", "[1;31m", "[32m",
+ "[1;32m", "[33m", "[33m",
+ "[34m", "[1;34m", "[35m",
+ "[1;35m", "[36m", "[1;36m" };
+rust_log::rust_log(rust_srv *srv, rust_dom *dom) :
+ _srv(srv), _dom(dom), _type_bit_mask(read_type_bit_mask()),
+ _use_colors(getenv("RUST_COLOR_LOG")), _indent(0) {
+}
+
+rust_log::~rust_log() {
+
+}
+
+void rust_log::trace_ln(char *message) {
+ char buffer[512];
+ if (_use_colors) {
+ snprintf(buffer, sizeof(buffer), "\x1b%s0x%08" PRIxPTR "\x1b[0m: ",
+ _foreground_colors[1 + ((uintptr_t) _dom % 2687 % (LIGHTTEAL
+ - 1))], (uintptr_t) _dom);
+ } else {
+ snprintf(buffer, sizeof(buffer), "0x%08" PRIxPTR ": ",
+ (uintptr_t) _dom);
+ }
+
+ for (uint32_t i = 0; i < _indent; i++) {
+ strncat(buffer, "\t", sizeof(buffer) - strlen(buffer) - 1);
+ }
+ strncat(buffer, message, sizeof(buffer) - strlen(buffer) - 1);
+ _srv->log(buffer);
+}
+
+/**
+ * Traces a log message if the specified logging type is not filtered.
+ */
+void rust_log::trace_ln(uint32_t type_bits, char *message) {
+ trace_ln(get_type_color((rust_log::log_type) type_bits), type_bits,
+ message);
+}
+
+/**
+ * Traces a log message using the specified ANSI color code.
+ */
+void rust_log::trace_ln(ansi_color color, uint32_t type_bits, char *message) {
+ if (is_tracing(type_bits)) {
+ if (_use_colors) {
+ char buffer[512];
+ snprintf(buffer, sizeof(buffer), "\x1b%s%s\x1b[0m",
+ _foreground_colors[color], message);
+ trace_ln(buffer);
+ } else {
+ trace_ln(message);
+ }
+ }
+}
+
+bool rust_log::is_tracing(uint32_t type_bits) {
+ return type_bits & _type_bit_mask;
+}
+
+void rust_log::indent() {
+ _indent++;
+}
+
+void rust_log::outdent() {
+ _indent--;
+}
+
+void rust_log::reset_indent(uint32_t indent) {
+ _indent = indent;
+}
diff --git a/src/rt/rust_log.h b/src/rt/rust_log.h
new file mode 100644
index 00000000..b0c5fbec
--- /dev/null
+++ b/src/rt/rust_log.h
@@ -0,0 +1,59 @@
+#ifndef RUST_LOG_H_
+#define RUST_LOG_H_
+
+class rust_dom;
+
+class rust_log {
+ rust_srv *_srv;
+ rust_dom *_dom;
+ uint32_t _type_bit_mask;
+ bool _use_colors;
+ uint32_t _indent;
+ void trace_ln(char *message);
+public:
+ rust_log(rust_srv *srv, rust_dom *dom);
+ virtual ~rust_log();
+
+ enum ansi_color {
+ BLACK,
+ GRAY,
+ WHITE,
+ RED,
+ LIGHTRED,
+ GREEN,
+ LIGHTGREEN,
+ YELLOW,
+ LIGHTYELLOW,
+ BLUE,
+ LIGHTBLUE,
+ MAGENTA,
+ LIGHTMAGENTA,
+ TEAL,
+ LIGHTTEAL
+ };
+
+ enum log_type {
+ ERR = 0x1,
+ MEM = 0x2,
+ COMM = 0x4,
+ TASK = 0x8,
+ DOM = 0x10,
+ ULOG = 0x20,
+ TRACE = 0x40,
+ DWARF = 0x80,
+ CACHE = 0x100,
+ UPCALL = 0x200,
+ TIMER = 0x400,
+ ALL = 0xffffffff
+ };
+
+ void indent();
+ void outdent();
+ void reset_indent(uint32_t indent);
+ void trace_ln(uint32_t type_bits, char *message);
+ void trace_ln(ansi_color color, uint32_t type_bits, char *message);
+ bool is_tracing(uint32_t type_bits);
+ static ansi_color get_type_color(log_type type);
+};
+
+#endif /* RUST_LOG_H_ */
diff --git a/src/rt/rust_task.cpp b/src/rt/rust_task.cpp
new file mode 100644
index 00000000..beba11a0
--- /dev/null
+++ b/src/rt/rust_task.cpp
@@ -0,0 +1,474 @@
+
+#include "rust_internal.h"
+
+#include "valgrind.h"
+#include "memcheck.h"
+
+// Stacks
+
+static size_t const min_stk_bytes = 0x300;
+
+// Task stack segments. Heap allocated and chained together.
+
+static stk_seg*
+new_stk(rust_dom *dom, size_t minsz)
+{
+ if (minsz < min_stk_bytes)
+ minsz = min_stk_bytes;
+ size_t sz = sizeof(stk_seg) + minsz;
+ stk_seg *stk = (stk_seg *)dom->malloc(sz);
+ dom->logptr("new stk", (uintptr_t)stk);
+ memset(stk, 0, sizeof(stk_seg));
+ stk->limit = (uintptr_t) &stk->data[minsz];
+ dom->logptr("stk limit", stk->limit);
+ stk->valgrind_id =
+ VALGRIND_STACK_REGISTER(&stk->data[0],
+ &stk->data[minsz]);
+ return stk;
+}
+
+static void
+del_stk(rust_dom *dom, stk_seg *stk)
+{
+ VALGRIND_STACK_DEREGISTER(stk->valgrind_id);
+ dom->logptr("freeing stk segment", (uintptr_t)stk);
+ dom->free(stk);
+}
+
+// Tasks
+
+// FIXME (issue #31): ifdef by platform. This is getting absurdly
+// x86-specific.
+
+size_t const n_callee_saves = 4;
+size_t const callee_save_fp = 0;
+
+static uintptr_t
+align_down(uintptr_t sp)
+{
+ // There is no platform we care about that needs more than a
+ // 16-byte alignment.
+ return sp & ~(16 - 1);
+}
+
+
+rust_task::rust_task(rust_dom *dom, rust_task *spawner) :
+ stk(new_stk(dom, 0)),
+ runtime_sp(0),
+ rust_sp(stk->limit),
+ gc_alloc_chain(0),
+ dom(dom),
+ cache(NULL),
+ state(&dom->running_tasks),
+ cond(NULL),
+ dptr(0),
+ spawner(spawner),
+ idx(0),
+ waiting_tasks(dom),
+ alarm(this)
+{
+ dom->logptr("new task", (uintptr_t)this);
+}
+
+rust_task::~rust_task()
+{
+ dom->log(rust_log::MEM|rust_log::TASK,
+ "~rust_task 0x%" PRIxPTR ", refcnt=%d",
+ (uintptr_t)this, refcnt);
+
+ /*
+ for (uintptr_t fp = get_fp(); fp; fp = get_previous_fp(fp)) {
+ frame_glue_fns *glue_fns = get_frame_glue_fns(fp);
+ dom->log(rust_log::MEM|rust_log::TASK,
+ "~rust_task, frame fp=0x%" PRIxPTR ", glue_fns=0x%" PRIxPTR,
+ fp, glue_fns);
+ if (glue_fns) {
+ dom->log(rust_log::MEM|rust_log::TASK,
+ "~rust_task, mark_glue=0x%" PRIxPTR,
+ glue_fns->mark_glue);
+ dom->log(rust_log::MEM|rust_log::TASK,
+ "~rust_task, drop_glue=0x%" PRIxPTR,
+ glue_fns->drop_glue);
+ dom->log(rust_log::MEM|rust_log::TASK,
+ "~rust_task, reloc_glue=0x%" PRIxPTR,
+ glue_fns->reloc_glue);
+ }
+ }
+ */
+
+ /* FIXME: tighten this up, there are some more
+ assertions that hold at task-lifecycle events. */
+ I(dom, refcnt == 0 ||
+ (refcnt == 1 && this == dom->root_task));
+
+ del_stk(dom, stk);
+ if (cache)
+ cache->deref();
+}
+
+void
+rust_task::start(uintptr_t exit_task_glue,
+ uintptr_t spawnee_fn,
+ uintptr_t args,
+ size_t callsz)
+{
+ dom->logptr("exit-task glue", exit_task_glue);
+ dom->logptr("from spawnee", spawnee_fn);
+
+ // Set sp to last uintptr_t-sized cell of segment and align down.
+ rust_sp -= sizeof(uintptr_t);
+ rust_sp = align_down(rust_sp);
+
+ // Begin synthesizing frames. There are two: a "fully formed"
+ // exit-task frame at the top of the stack -- that pretends to be
+ // mid-execution -- and a just-starting frame beneath it that
+ // starts executing the first instruction of the spawnee. The
+ // spawnee *thinks* it was called by the exit-task frame above
+ // it. It wasn't; we put that fake frame in place here, but the
+ // illusion is enough for the spawnee to return to the exit-task
+ // frame when it's done, and exit.
+ uintptr_t *spp = (uintptr_t *)rust_sp;
+
+ // The exit_task_glue frame we synthesize above the frame we activate:
+ *spp-- = (uintptr_t) this; // task
+ *spp-- = (uintptr_t) 0; // output
+ *spp-- = (uintptr_t) 0; // retpc
+ for (size_t j = 0; j < n_callee_saves; ++j) {
+ *spp-- = 0;
+ }
+
+ // We want 'frame_base' to point to the last callee-save in this
+ // (exit-task) frame, because we're going to inject this
+ // frame-pointer into the callee-save frame pointer value in the
+ // *next* (spawnee) frame. A cheap trick, but this means the
+ // spawnee frame will restore the proper frame pointer of the glue
+ // frame as it runs its epilogue.
+ uintptr_t frame_base = (uintptr_t) (spp+1);
+
+ *spp-- = (uintptr_t) dom->root_crate; // crate ptr
+ *spp-- = (uintptr_t) 0; // frame_glue_fns
+
+ // Copy args from spawner to spawnee.
+ if (args) {
+ uintptr_t *src = (uintptr_t *)args;
+ src += 1; // spawn-call output slot
+ src += 1; // spawn-call task slot
+ // Memcpy all but the task and output pointers
+ callsz -= (2 * sizeof(uintptr_t));
+ spp = (uintptr_t*) (((uintptr_t)spp) - callsz);
+ memcpy(spp, src, callsz);
+
+ // Move sp down to point to task cell.
+ spp--;
+ } else {
+ // We're at root, starting up.
+ I(dom, callsz==0);
+ }
+
+ // The *implicit* incoming args to the spawnee frame we're
+ // activating:
+
+ *spp-- = (uintptr_t) this; // task
+ *spp-- = (uintptr_t) 0; // output addr
+ *spp-- = (uintptr_t) exit_task_glue; // retpc
+
+ // The context the activate_glue needs to switch stack.
+ *spp-- = (uintptr_t) spawnee_fn; // instruction to start at
+ for (size_t j = 0; j < n_callee_saves; ++j) {
+ // callee-saves to carry in when we activate
+ if (j == callee_save_fp)
+ *spp-- = frame_base;
+ else
+ *spp-- = NULL;
+ }
+
+ // Back up one, we overshot where sp should be.
+ rust_sp = (uintptr_t) (spp+1);
+
+ dom->add_task_to_state_vec(&dom->running_tasks, this);
+}
+
+void
+rust_task::grow(size_t n_frame_bytes)
+{
+ stk_seg *old_stk = this->stk;
+ uintptr_t old_top = (uintptr_t) old_stk->limit;
+ uintptr_t old_bottom = (uintptr_t) &old_stk->data[0];
+ uintptr_t rust_sp_disp = old_top - this->rust_sp;
+ size_t ssz = old_top - old_bottom;
+ dom->log(rust_log::MEM|rust_log::TASK|rust_log::UPCALL,
+ "upcall_grow_task(%" PRIdPTR
+ "), old size %" PRIdPTR
+ " bytes (old lim: 0x%" PRIxPTR ")",
+ n_frame_bytes, ssz, old_top);
+ ssz *= 2;
+ if (ssz < n_frame_bytes)
+ ssz = n_frame_bytes;
+ ssz = next_power_of_two(ssz);
+
+ dom->log(rust_log::MEM|rust_log::TASK, "upcall_grow_task growing stk 0x%"
+ PRIxPTR " to %d bytes", old_stk, ssz);
+
+ stk_seg *nstk = new_stk(dom, ssz);
+ uintptr_t new_top = (uintptr_t) &nstk->data[ssz];
+ size_t n_copy = old_top - old_bottom;
+ dom->log(rust_log::MEM|rust_log::TASK,
+ "copying %d bytes of stack from [0x%" PRIxPTR ", 0x%" PRIxPTR "]"
+ " to [0x%" PRIxPTR ", 0x%" PRIxPTR "]",
+ n_copy,
+ old_bottom, old_bottom + n_copy,
+ new_top - n_copy, new_top);
+
+ VALGRIND_MAKE_MEM_DEFINED((void*)old_bottom, n_copy);
+ memcpy((void*)(new_top - n_copy), (void*)old_bottom, n_copy);
+
+ nstk->limit = new_top;
+ this->stk = nstk;
+ this->rust_sp = new_top - rust_sp_disp;
+
+ dom->log(rust_log::MEM|rust_log::TASK, "processing relocations");
+
+ // FIXME (issue #32): this is the most ridiculously crude
+ // relocation scheme ever. Try actually, you know, writing out
+ // reloc descriptors?
+ size_t n_relocs = 0;
+ for (uintptr_t* p = (uintptr_t*)(new_top - n_copy);
+ p < (uintptr_t*)new_top; ++p) {
+ if (old_bottom <= *p && *p < old_top) {
+ //dom->log(rust_log::MEM, "relocating pointer 0x%" PRIxPTR
+ // " by %d bytes", *p, (new_top - old_top));
+ n_relocs++;
+ *p += (new_top - old_top);
+ }
+ }
+ dom->log(rust_log::MEM|rust_log::TASK,
+ "processed %d relocations", n_relocs);
+ del_stk(dom, old_stk);
+ dom->logptr("grown stk limit", new_top);
+}
+
+void
+push_onto_thread_stack(uintptr_t &sp, uintptr_t value)
+{
+ asm("xchgl %0, %%esp\n"
+ "push %2\n"
+ "xchgl %0, %%esp\n"
+ : "=r" (sp)
+ : "0" (sp), "r" (value)
+ : "eax");
+}
+
+void
+rust_task::run_after_return(size_t nargs, uintptr_t glue)
+{
+ // This is only safe to call if we're the currently-running task.
+ check_active();
+
+ uintptr_t sp = runtime_sp;
+
+ // The compiler reserves nargs + 1 word for oldsp on the stack and
+ // then aligns it.
+ sp = align_down(sp - nargs * sizeof(uintptr_t));
+
+ uintptr_t *retpc = ((uintptr_t *) sp) - 1;
+ dom->log(rust_log::TASK|rust_log::MEM,
+ "run_after_return: overwriting retpc=0x%" PRIxPTR
+ " @ runtime_sp=0x%" PRIxPTR
+ " with glue=0x%" PRIxPTR,
+ *retpc, sp, glue);
+
+ // Move the current return address (which points into rust code)
+ // onto the rust stack and pretend we just called into the glue.
+ push_onto_thread_stack(rust_sp, *retpc);
+ *retpc = glue;
+}
+
+void
+rust_task::run_on_resume(uintptr_t glue)
+{
+ // This is only safe to call if we're suspended.
+ check_suspended();
+
+ // Inject glue as resume address in the suspended frame.
+ uintptr_t* rsp = (uintptr_t*) rust_sp;
+ rsp += n_callee_saves;
+ dom->log(rust_log::TASK|rust_log::MEM,
+ "run_on_resume: overwriting retpc=0x%" PRIxPTR
+ " @ rust_sp=0x%" PRIxPTR
+ " with glue=0x%" PRIxPTR,
+ *rsp, rsp, glue);
+ *rsp = glue;
+}
+
+void
+rust_task::yield(size_t nargs)
+{
+ dom->log(rust_log::TASK,
+ "task 0x%" PRIxPTR " yielding", this);
+ run_after_return(nargs, dom->root_crate->get_yield_glue());
+}
+
+static inline uintptr_t
+get_callee_save_fp(uintptr_t *top_of_callee_saves)
+{
+ return top_of_callee_saves[n_callee_saves - (callee_save_fp + 1)];
+}
+
+void
+rust_task::kill() {
+ // Note the distinction here: kill() is when you're in an upcall
+ // from task A and want to force-fail task B, you do B->kill().
+ // If you want to fail yourself you do self->fail(upcall_nargs).
+ dom->log(rust_log::TASK, "killing task 0x%" PRIxPTR, this);
+ // Unblock the task so it can unwind.
+ unblock();
+ if (this == dom->root_task)
+ dom->fail();
+ run_on_resume(dom->root_crate->get_unwind_glue());
+}
+
+void
+rust_task::fail(size_t nargs) {
+ // See note in ::kill() regarding who should call this.
+ dom->log(rust_log::TASK, "task 0x%" PRIxPTR " failing", this);
+ // Unblock the task so it can unwind.
+ unblock();
+ if (this == dom->root_task)
+ dom->fail();
+ run_after_return(nargs, dom->root_crate->get_unwind_glue());
+ if (spawner) {
+ dom->log(rust_log::TASK,
+ "task 0x%" PRIxPTR
+ " propagating failure to parent 0x%" PRIxPTR,
+ this, spawner);
+ spawner->kill();
+ }
+}
+
+void
+rust_task::notify_waiting_tasks()
+{
+ while (waiting_tasks.length() > 0) {
+ rust_task *t = waiting_tasks.pop()->receiver;
+ if (!t->dead())
+ t->wakeup(this);
+ }
+}
+
+uintptr_t
+rust_task::get_fp() {
+ // sp in any suspended task points to the last callee-saved reg on
+ // the task stack.
+ return get_callee_save_fp((uintptr_t*)rust_sp);
+}
+
+uintptr_t
+rust_task::get_previous_fp(uintptr_t fp) {
+ // fp happens to, coincidentally (!) also point to the last
+ // callee-save on the task stack.
+ return get_callee_save_fp((uintptr_t*)fp);
+}
+
+frame_glue_fns*
+rust_task::get_frame_glue_fns(uintptr_t fp) {
+ fp -= sizeof(uintptr_t);
+ return *((frame_glue_fns**) fp);
+}
+
+bool
+rust_task::running()
+{
+ return state == &dom->running_tasks;
+}
+
+bool
+rust_task::blocked()
+{
+ return state == &dom->blocked_tasks;
+}
+
+bool
+rust_task::blocked_on(rust_cond *on)
+{
+ return blocked() && cond == on;
+}
+
+bool
+rust_task::dead()
+{
+ return state == &dom->dead_tasks;
+}
+
+void
+rust_task::transition(ptr_vec<rust_task> *src, ptr_vec<rust_task> *dst)
+{
+ I(dom, state == src);
+ dom->log(rust_log::TASK,
+ "task 0x%" PRIxPTR " state change '%s' -> '%s'",
+ (uintptr_t)this,
+ dom->state_vec_name(src),
+ dom->state_vec_name(dst));
+ dom->remove_task_from_state_vec(src, this);
+ dom->add_task_to_state_vec(dst, this);
+ state = dst;
+}
+
+void
+rust_task::block(rust_cond *on)
+{
+ I(dom, on);
+ transition(&dom->running_tasks, &dom->blocked_tasks);
+ dom->log(rust_log::TASK,
+ "task 0x%" PRIxPTR " blocking on 0x%" PRIxPTR,
+ (uintptr_t)this,
+ (uintptr_t)on);
+ cond = on;
+}
+
+void
+rust_task::wakeup(rust_cond *from)
+{
+ transition(&dom->blocked_tasks, &dom->running_tasks);
+ I(dom, cond == from);
+}
+
+void
+rust_task::die()
+{
+ transition(&dom->running_tasks, &dom->dead_tasks);
+}
+
+void
+rust_task::unblock()
+{
+ if (blocked())
+ wakeup(cond);
+}
+
+rust_crate_cache *
+rust_task::get_crate_cache(rust_crate const *curr_crate)
+{
+ if (cache && cache->crate != curr_crate) {
+ dom->log(rust_log::TASK, "switching task crate-cache to crate 0x%"
+ PRIxPTR, curr_crate);
+ cache->deref();
+ cache = NULL;
+ }
+
+ if (!cache) {
+ dom->log(rust_log::TASK, "fetching cache for current crate");
+ cache = dom->get_cache(curr_crate);
+ }
+ return cache;
+}
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
diff --git a/src/rt/rust_timer.cpp b/src/rt/rust_timer.cpp
new file mode 100644
index 00000000..897b7730
--- /dev/null
+++ b/src/rt/rust_timer.cpp
@@ -0,0 +1,97 @@
+
+#include "rust_internal.h"
+
+// The mechanism in this file is very crude; every domain (thread) spawns its
+// own secondary timer thread, and that timer thread *never idles*. It
+// sleep-loops interrupting the domain.
+//
+// This will need replacement, particularly in order to achieve an actual
+// state of idling when we're waiting on the outside world. Though that might
+// be as simple as making a secondary waitable start/stop-timer signalling
+// system between the domain and its timer thread. We'll see.
+//
+// On the other hand, we don't presently have the ability to idle domains *at
+// all*, and without the timer thread we're unable to otherwise preempt rust
+// tasks. So ... one step at a time.
+//
+// The implementation here is "lockless" in the sense that it only involves
+// one-directional signaling of one-shot events, so the event initiator just
+// writes a nonzero word to a prederermined location and waits for the
+// receiver to see it show up in their memory.
+
+#if defined(__WIN32__)
+static DWORD WINAPI
+win32_timer_loop(void *ptr)
+{
+ // We were handed the rust_timer that owns us.
+ rust_timer *timer = (rust_timer *)ptr;
+ rust_dom &dom = timer->dom;
+ dom.log(LOG_TIMER, "in timer 0x%" PRIxPTR, (uintptr_t)timer);
+ while (!timer->exit_flag) {
+ Sleep(TIME_SLICE_IN_MS);
+ dom.log(LOG_TIMER,
+ "timer 0x%" PRIxPTR
+ " interrupting domain 0x%" PRIxPTR,
+ (uintptr_t)timer,
+ (uintptr_t)&dom);
+ dom.interrupt_flag = 1;
+ }
+ ExitThread(0);
+ return 0;
+}
+
+#elif defined(__GNUC__)
+static void *
+pthread_timer_loop(void *ptr)
+{
+ // We were handed the rust_timer that owns us.
+ rust_timer *timer = (rust_timer *)ptr;
+ rust_dom &dom(timer->dom);
+ while (!timer->exit_flag) {
+ usleep(TIME_SLICE_IN_MS * 1000);
+ dom.interrupt_flag = 1;
+ }
+ pthread_exit(NULL);
+ return 0;
+
+}
+#else
+#error "Platform not supported"
+#endif
+
+
+rust_timer::rust_timer(rust_dom &dom) : dom(dom), exit_flag(0)
+{
+ dom.log(rust_log::TIMER, "creating timer for domain 0x%" PRIxPTR, &dom);
+#if defined(__WIN32__)
+ thread = CreateThread(NULL, 0, win32_timer_loop, this, 0, NULL);
+ dom.win32_require("CreateThread", thread != NULL);
+#else
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+ pthread_create(&thread, &attr, pthread_timer_loop, (void *)this);
+#endif
+}
+
+rust_timer::~rust_timer()
+{
+ exit_flag = 1;
+#if defined(__WIN32__)
+ dom.win32_require("WaitForSingleObject",
+ WaitForSingleObject(thread, INFINITE)
+ == WAIT_OBJECT_0);
+#else
+ pthread_join(thread, NULL);
+#endif
+}
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
diff --git a/src/rt/rust_upcall.cpp b/src/rt/rust_upcall.cpp
new file mode 100644
index 00000000..3a17ea1c
--- /dev/null
+++ b/src/rt/rust_upcall.cpp
@@ -0,0 +1,654 @@
+
+#include "rust_internal.h"
+
+
+// Upcalls.
+
+#ifdef __GNUC__
+#define LOG_UPCALL_ENTRY(task) \
+ (task)->dom->get_log().reset_indent(0); \
+ (task)->dom->log(rust_log::UPCALL, \
+ "upcall task: 0x%" PRIxPTR \
+ " retpc: 0x%" PRIxPTR, \
+ (task), __builtin_return_address(0)); \
+ (task)->dom->get_log().indent();
+#else
+#define LOG_UPCALL_ENTRY(task) \
+ (task)->dom->get_log().reset_indent(0); \
+ (task)->dom->log(rust_log::UPCALL, \
+ "upcall task: 0x%" PRIxPTR (task)); \
+ (task)->dom->get_log().indent();
+#endif
+
+extern "C" CDECL char const *str_buf(rust_task *task, rust_str *s);
+
+extern "C" void
+upcall_grow_task(rust_task *task, size_t n_frame_bytes)
+{
+ LOG_UPCALL_ENTRY(task);
+ task->grow(n_frame_bytes);
+}
+
+extern "C" CDECL void
+upcall_log_int(rust_task *task, int32_t i)
+{
+ LOG_UPCALL_ENTRY(task);
+ task->dom->log(rust_log::UPCALL|rust_log::ULOG,
+ "upcall log_int(0x%" PRIx32 " = %" PRId32 " = '%c')",
+ i, i, (char)i);
+}
+
+extern "C" CDECL void
+upcall_log_str(rust_task *task, rust_str *str)
+{
+ LOG_UPCALL_ENTRY(task);
+ const char *c = str_buf(task, str);
+ task->dom->log(rust_log::UPCALL|rust_log::ULOG,
+ "upcall log_str(\"%s\")",
+ c);
+}
+
+extern "C" CDECL void
+upcall_trace_word(rust_task *task, uintptr_t i)
+{
+ LOG_UPCALL_ENTRY(task);
+ task->dom->log(rust_log::UPCALL|rust_log::TRACE,
+ "trace: 0x%" PRIxPTR "",
+ i, i, (char)i);
+}
+
+extern "C" CDECL void
+upcall_trace_str(rust_task *task, char const *c)
+{
+ LOG_UPCALL_ENTRY(task);
+ task->dom->log(rust_log::UPCALL|rust_log::TRACE,
+ "trace: %s",
+ c);
+}
+
+extern "C" CDECL rust_port*
+upcall_new_port(rust_task *task, size_t unit_sz)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::MEM|rust_log::COMM,
+ "upcall_new_port(task=0x%" PRIxPTR ", unit_sz=%d)",
+ (uintptr_t)task, unit_sz);
+ return new (dom) rust_port(task, unit_sz);
+}
+
+extern "C" CDECL void
+upcall_del_port(rust_task *task, rust_port *port)
+{
+ LOG_UPCALL_ENTRY(task);
+ task->dom->log(rust_log::UPCALL|rust_log::MEM|rust_log::COMM,
+ "upcall del_port(0x%" PRIxPTR ")", (uintptr_t)port);
+ I(task->dom, !port->refcnt);
+ delete port;
+}
+
+extern "C" CDECL rust_chan*
+upcall_new_chan(rust_task *task, rust_port *port)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::MEM|rust_log::COMM,
+ "upcall_new_chan(task=0x%" PRIxPTR ", port=0x%" PRIxPTR ")",
+ (uintptr_t)task, port);
+ I(dom, port);
+ return new (dom) rust_chan(task, port);
+}
+
+extern "C" CDECL void
+upcall_del_chan(rust_task *task, rust_chan *chan)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::MEM|rust_log::COMM,
+ "upcall del_chan(0x%" PRIxPTR ")", (uintptr_t)chan);
+ I(dom, !chan->refcnt);
+ delete chan;
+}
+
+extern "C" CDECL rust_chan *
+upcall_clone_chan(rust_task *task, rust_task *owner, rust_chan *chan)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::MEM|rust_log::COMM,
+ "upcall clone_chan(owner 0x%" PRIxPTR ", chan 0x%" PRIxPTR ")",
+ (uintptr_t)owner, (uintptr_t)chan);
+ return new (owner->dom) rust_chan(owner, chan->port);
+}
+
+
+/*
+ * Buffering protocol:
+ *
+ * - Reader attempts to read:
+ * - Set reader to blocked-reading state.
+ * - If buf with data exists:
+ * - Attempt transmission.
+ *
+ * - Writer attempts to write:
+ * - Set writer to blocked-writing state.
+ * - Copy data into chan.
+ * - Attempt transmission.
+ *
+ * - Transmission:
+ * - Copy data from buf to reader
+ * - Decr buf
+ * - Set reader to running
+ * - If buf now empty and blocked writer:
+ * - Set blocked writer to running
+ *
+ */
+
+static int
+attempt_transmission(rust_dom *dom,
+ rust_chan *src,
+ rust_task *dst)
+{
+ I(dom, src);
+ I(dom, dst);
+
+ rust_port *port = src->port;
+ if (!port) {
+ dom->log(rust_log::COMM,
+ "src died, transmission incomplete");
+ return 0;
+ }
+
+ circ_buf *buf = &src->buffer;
+ if (buf->unread == 0) {
+ dom->log(rust_log::COMM,
+ "buffer empty, transmission incomplete");
+ return 0;
+ }
+
+ if (!dst->blocked_on(port)) {
+ dom->log(rust_log::COMM,
+ "dst in non-reading state, transmission incomplete");
+ return 0;
+ }
+
+ uintptr_t *dptr = dst->dptr;
+ dom->log(rust_log::COMM,
+ "receiving %d bytes into dst_task=0x%" PRIxPTR
+ ", dptr=0x%" PRIxPTR,
+ port->unit_sz, dst, dptr);
+ buf->shift(dptr);
+
+ // Wake up the sender if its waiting for the send operation.
+ rust_task *sender = src->task;
+ rust_token *token = &src->token;
+ if (sender->blocked_on(token))
+ sender->wakeup(token);
+
+ // Wake up the receiver, there is new data.
+ dst->wakeup(port);
+
+ dom->log(rust_log::COMM, "transmission complete");
+ return 1;
+}
+
+extern "C" CDECL void
+upcall_yield(rust_task *task)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::COMM, "upcall yield()");
+ task->yield(1);
+}
+
+extern "C" CDECL void
+upcall_join(rust_task *task, rust_task *other)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::COMM,
+ "upcall join(other=0x%" PRIxPTR ")",
+ (uintptr_t)other);
+
+ // If the other task is already dying, we dont have to wait for it.
+ if (!other->dead()) {
+ other->waiting_tasks.push(&task->alarm);
+ task->block(other);
+ task->yield(2);
+ }
+}
+
+extern "C" CDECL void
+upcall_send(rust_task *task, rust_chan *chan, void *sptr)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::COMM,
+ "upcall send(chan=0x%" PRIxPTR ", sptr=0x%" PRIxPTR ")",
+ (uintptr_t)chan,
+ (uintptr_t)sptr);
+
+ I(dom, chan);
+ I(dom, sptr);
+
+ rust_port *port = chan->port;
+ dom->log(rust_log::MEM|rust_log::COMM,
+ "send to port", (uintptr_t)port);
+ I(dom, port);
+
+ rust_token *token = &chan->token;
+ dom->log(rust_log::MEM|rust_log::COMM,
+ "sending via token 0x%" PRIxPTR,
+ (uintptr_t)token);
+
+ if (port->task) {
+ chan->buffer.push(sptr);
+ task->block(token);
+ attempt_transmission(dom, chan, port->task);
+ if (chan->buffer.unread && !token->pending())
+ token->submit();
+ } else {
+ dom->log(rust_log::COMM|rust_log::ERR,
+ "port has no task (possibly throw?)");
+ }
+
+ if (!task->running())
+ task->yield(3);
+}
+
+extern "C" CDECL void
+upcall_recv(rust_task *task, uintptr_t *dptr, rust_port *port)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::COMM,
+ "upcall recv(dptr=0x" PRIxPTR ", port=0x%" PRIxPTR ")",
+ (uintptr_t)dptr,
+ (uintptr_t)port);
+
+ I(dom, port);
+ I(dom, port->task);
+ I(dom, task);
+ I(dom, port->task == task);
+
+ task->block(port);
+
+ if (port->writers.length() > 0) {
+ I(dom, task->dom);
+ size_t i = rand(&dom->rctx);
+ i %= port->writers.length();
+ rust_token *token = port->writers[i];
+ rust_chan *chan = token->chan;
+ if (attempt_transmission(dom, chan, task))
+ token->withdraw();
+ } else {
+ dom->log(rust_log::COMM,
+ "no writers sending to port", (uintptr_t)port);
+ }
+
+ if (!task->running()) {
+ task->dptr = dptr;
+ task->yield(3);
+ }
+}
+
+extern "C" CDECL void
+upcall_fail(rust_task *task, char const *expr, char const *file, size_t line)
+{
+ LOG_UPCALL_ENTRY(task);
+ task->dom->log(rust_log::UPCALL|rust_log::ERR,
+ "upcall fail '%s', %s:%" PRIdPTR,
+ expr, file, line);
+ task->fail(4);
+}
+
+extern "C" CDECL void
+upcall_kill(rust_task *task, rust_task *target)
+{
+ LOG_UPCALL_ENTRY(task);
+ task->dom->log(rust_log::UPCALL|rust_log::TASK,
+ "upcall kill target=0x%" PRIxPTR, target);
+ target->kill();
+}
+
+extern "C" CDECL void
+upcall_exit(rust_task *task)
+{
+ LOG_UPCALL_ENTRY(task);
+
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::TASK, "upcall exit");
+ task->die();
+ task->notify_waiting_tasks();
+ task->yield(1);
+}
+
+extern "C" CDECL uintptr_t
+upcall_malloc(rust_task *task, size_t nbytes)
+{
+ LOG_UPCALL_ENTRY(task);
+
+ void *p = task->dom->malloc(nbytes);
+ task->dom->log(rust_log::UPCALL|rust_log::MEM,
+ "upcall malloc(%u) = 0x%" PRIxPTR,
+ nbytes, (uintptr_t)p);
+ return (uintptr_t) p;
+}
+
+extern "C" CDECL void
+upcall_free(rust_task *task, void* ptr)
+{
+ LOG_UPCALL_ENTRY(task);
+
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::MEM,
+ "upcall free(0x%" PRIxPTR ")",
+ (uintptr_t)ptr);
+ dom->free(ptr);
+}
+
+extern "C" CDECL rust_str *
+upcall_new_str(rust_task *task, char const *s, size_t fill)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::MEM,
+ "upcall new_str('%s', %" PRIdPTR ")", s, fill);
+ size_t alloc = next_power_of_two(sizeof(rust_str) + fill);
+ void *mem = dom->malloc(alloc);
+ if (!mem) {
+ task->fail(3);
+ return NULL;
+ }
+ rust_str *st = new (mem) rust_str(dom, alloc, fill, (uint8_t const *)s);
+ dom->log(rust_log::UPCALL|rust_log::MEM,
+ "upcall new_str('%s', %" PRIdPTR ") = 0x%" PRIxPTR,
+ s, fill, st);
+ return st;
+}
+
+extern "C" CDECL rust_vec *
+upcall_new_vec(rust_task *task, size_t fill)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::MEM,
+ "upcall new_vec(%" PRIdPTR ")", fill);
+ size_t alloc = next_power_of_two(sizeof(rust_vec) + fill);
+ void *mem = dom->malloc(alloc);
+ if (!mem) {
+ task->fail(3);
+ return NULL;
+ }
+ rust_vec *v = new (mem) rust_vec(dom, alloc, 0, NULL);
+ dom->log(rust_log::UPCALL|rust_log::MEM,
+ "upcall new_vec(%" PRIdPTR ") = 0x%" PRIxPTR,
+ fill, v);
+ return v;
+}
+
+
+extern "C" CDECL rust_str *
+upcall_vec_grow(rust_task *task, rust_vec *v, size_t n_bytes)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::MEM,
+ "upcall vec_grow(%" PRIxPTR ", %" PRIdPTR ")", v, n_bytes);
+ size_t alloc = next_power_of_two(sizeof(rust_vec) + v->fill + n_bytes);
+ if (v->refcnt == 1) {
+
+ // Fastest path: already large enough.
+ if (v->alloc >= alloc) {
+ dom->log(rust_log::UPCALL|rust_log::MEM, "no-growth path");
+ return v;
+ }
+
+ // Second-fastest path: can at least realloc.
+ dom->log(rust_log::UPCALL|rust_log::MEM, "realloc path");
+ v = (rust_vec*)dom->realloc(v, alloc);
+ if (!v) {
+ task->fail(3);
+ return NULL;
+ }
+ v->alloc = alloc;
+
+ } else {
+ // Slowest path: make a new vec.
+ dom->log(rust_log::UPCALL|rust_log::MEM, "new vec path");
+ void *mem = dom->malloc(alloc);
+ if (!mem) {
+ task->fail(3);
+ return NULL;
+ }
+ v->deref();
+ v = new (mem) rust_vec(dom, alloc, v->fill, &v->data[0]);
+ }
+ I(dom, sizeof(rust_vec) + v->fill <= v->alloc);
+ return v;
+}
+
+
+static rust_crate_cache::c_sym *
+fetch_c_sym(rust_task *task,
+ rust_crate const *curr_crate,
+ size_t lib_num,
+ size_t c_sym_num,
+ char const *library,
+ char const *symbol)
+{
+ rust_crate_cache *cache = task->get_crate_cache(curr_crate);
+ rust_crate_cache::lib *l = cache->get_lib(lib_num, library);
+ return cache->get_c_sym(c_sym_num, l, symbol);
+}
+
+extern "C" CDECL uintptr_t
+upcall_require_rust_sym(rust_task *task,
+ rust_crate const *curr_crate,
+ size_t lib_num, // # of lib
+ size_t c_sym_num, // # of C sym "rust_crate" in lib
+ size_t rust_sym_num, // # of rust sym
+ char const *library,
+ char const **path)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+
+ dom->log(rust_log::UPCALL|rust_log::CACHE,
+ "upcall require rust sym: lib #%" PRIdPTR
+ " = %s, c_sym #%" PRIdPTR
+ ", rust_sym #%" PRIdPTR
+ ", curr_crate = 0x%" PRIxPTR,
+ lib_num, library, c_sym_num, rust_sym_num,
+ curr_crate);
+ for (char const **c = crate_rel(curr_crate, path); *c; ++c) {
+ dom->log(rust_log::UPCALL, " + %s", crate_rel(curr_crate, *c));
+ }
+
+ dom->log(rust_log::UPCALL|rust_log::CACHE,
+ "require C symbol 'rust_crate' from lib #%" PRIdPTR,lib_num);
+ rust_crate_cache::c_sym *c =
+ fetch_c_sym(task, curr_crate, lib_num, c_sym_num,
+ library, "rust_crate");
+
+ dom->log(rust_log::UPCALL|rust_log::CACHE,
+ "require rust symbol inside crate");
+ rust_crate_cache::rust_sym *s =
+ task->cache->get_rust_sym(rust_sym_num, dom, curr_crate, c, path);
+
+ uintptr_t addr = s->get_val();
+ if (addr) {
+ dom->log(rust_log::UPCALL|rust_log::CACHE,
+ "found-or-cached addr: 0x%" PRIxPTR, addr);
+ } else {
+ dom->log(rust_log::UPCALL|rust_log::CACHE,
+ "failed to resolve symbol");
+ task->fail(7);
+ }
+ return addr;
+}
+
+extern "C" CDECL uintptr_t
+upcall_require_c_sym(rust_task *task,
+ rust_crate const *curr_crate,
+ size_t lib_num, // # of lib
+ size_t c_sym_num, // # of C sym
+ char const *library,
+ char const *symbol)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+
+ dom->log(rust_log::UPCALL|rust_log::CACHE,
+ "upcall require c sym: lib #%" PRIdPTR
+ " = %s, c_sym #%" PRIdPTR
+ " = %s"
+ ", curr_crate = 0x%" PRIxPTR,
+ lib_num, library, c_sym_num, symbol, curr_crate);
+
+ rust_crate_cache::c_sym *c =
+ fetch_c_sym(task, curr_crate, lib_num, c_sym_num, library, symbol);
+
+ uintptr_t addr = c->get_val();
+ if (addr) {
+ dom->log(rust_log::UPCALL|rust_log::CACHE,
+ "found-or-cached addr: 0x%" PRIxPTR, addr);
+ } else {
+ dom->log(rust_log::UPCALL|rust_log::CACHE,
+ "failed to resolve symbol");
+ task->fail(6);
+ }
+ return addr;
+}
+
+extern "C" CDECL type_desc *
+upcall_get_type_desc(rust_task *task,
+ rust_crate const *curr_crate,
+ size_t size,
+ size_t align,
+ size_t n_descs,
+ type_desc const **descs)
+{
+ LOG_UPCALL_ENTRY(task);
+ rust_dom *dom = task->dom;
+ dom->log(rust_log::UPCALL|rust_log::CACHE,
+ "upcall get_type_desc with size=%" PRIdPTR
+ ", align=%" PRIdPTR ", %" PRIdPTR " descs",
+ size, align, n_descs);
+ rust_crate_cache *cache = task->get_crate_cache(curr_crate);
+ type_desc *td = cache->get_type_desc(size, align, n_descs, descs);
+ dom->log(rust_log::UPCALL|rust_log::CACHE,
+ "returning tydesc 0x%" PRIxPTR, td);
+ return td;
+}
+
+
+#if defined(__WIN32__)
+static DWORD WINAPI rust_thread_start(void *ptr)
+#elif defined(__GNUC__)
+static void *rust_thread_start(void *ptr)
+#else
+#error "Platform not supported"
+#endif
+{
+ // We were handed the domain we are supposed to run.
+ rust_dom *dom = (rust_dom *)ptr;
+
+ // Start a new rust main loop for this thread.
+ rust_main_loop(dom);
+
+ rust_srv *srv = dom->srv;
+ delete dom;
+ delete srv;
+
+ return 0;
+}
+
+extern "C" CDECL rust_task *
+upcall_new_task(rust_task *spawner)
+{
+ LOG_UPCALL_ENTRY(spawner);
+
+ rust_dom *dom = spawner->dom;
+ rust_task *task = new (dom) rust_task(dom, spawner);
+ dom->log(rust_log::UPCALL|rust_log::MEM|rust_log::TASK,
+ "upcall new_task(spawner 0x%" PRIxPTR ") = 0x%" PRIxPTR,
+ spawner, task);
+ return task;
+}
+
+extern "C" CDECL rust_task *
+upcall_start_task(rust_task *spawner,
+ rust_task *task,
+ uintptr_t exit_task_glue,
+ uintptr_t spawnee_fn,
+ size_t callsz)
+{
+ LOG_UPCALL_ENTRY(spawner);
+
+ rust_dom *dom = spawner->dom;
+ dom->log(rust_log::UPCALL|rust_log::MEM|rust_log::TASK,
+ "upcall start_task(task 0x%" PRIxPTR
+ " exit_task_glue 0x%" PRIxPTR
+ ", spawnee 0x%" PRIxPTR
+ ", callsz %" PRIdPTR ")",
+ task, exit_task_glue, spawnee_fn, callsz);
+ task->start(exit_task_glue, spawnee_fn, spawner->rust_sp, callsz);
+ return task;
+}
+
+extern "C" CDECL rust_task *
+upcall_new_thread(rust_task *task)
+{
+ LOG_UPCALL_ENTRY(task);
+
+ rust_dom *old_dom = task->dom;
+ rust_dom *new_dom = new rust_dom(old_dom->srv->clone(),
+ old_dom->root_crate);
+ new_dom->log(rust_log::UPCALL|rust_log::MEM,
+ "upcall new_thread() = 0x%" PRIxPTR,
+ new_dom->root_task);
+ return new_dom->root_task;
+}
+
+extern "C" CDECL rust_task *
+upcall_start_thread(rust_task *spawner,
+ rust_task *root_task,
+ uintptr_t exit_task_glue,
+ uintptr_t spawnee_fn,
+ size_t callsz)
+{
+ LOG_UPCALL_ENTRY(spawner);
+
+ rust_dom *dom = spawner->dom;
+ dom->log(rust_log::UPCALL|rust_log::MEM|rust_log::TASK,
+ "upcall start_thread(exit_task_glue 0x%" PRIxPTR
+ ", spawnee 0x%" PRIxPTR
+ ", callsz %" PRIdPTR ")",
+ exit_task_glue, spawnee_fn, callsz);
+ root_task->start(exit_task_glue, spawnee_fn, spawner->rust_sp, callsz);
+
+#if defined(__WIN32__)
+ HANDLE thread;
+ thread = CreateThread(NULL, 0, rust_thread_start, root_task->dom,
+ 0, NULL);
+ dom->win32_require("CreateThread", thread != NULL);
+#else
+ pthread_t thread;
+ pthread_create(&thread, &dom->attr, rust_thread_start,
+ (void *)root_task->dom);
+#endif
+
+ return 0;
+}
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
diff --git a/src/rt/rust_util.h b/src/rt/rust_util.h
new file mode 100644
index 00000000..6f34dad9
--- /dev/null
+++ b/src/rt/rust_util.h
@@ -0,0 +1,155 @@
+#ifndef RUST_UTIL_H
+#define RUST_UTIL_H
+
+// Reference counted objects
+
+template <typename T>
+rc_base<T>::rc_base() :
+ refcnt(1)
+{
+}
+
+template <typename T>
+rc_base<T>::~rc_base()
+{
+}
+
+// Utility type: pointer-vector.
+
+template <typename T>
+ptr_vec<T>::ptr_vec(rust_dom *dom) :
+ dom(dom),
+ alloc(INIT_SIZE),
+ fill(0),
+ data(new (dom) T*[alloc])
+{
+ I(dom, data);
+ dom->log(rust_log::MEM,
+ "new ptr_vec(data=0x%" PRIxPTR ") -> 0x%" PRIxPTR,
+ (uintptr_t)data, (uintptr_t)this);
+}
+
+template <typename T>
+ptr_vec<T>::~ptr_vec()
+{
+ I(dom, data);
+ dom->log(rust_log::MEM,
+ "~ptr_vec 0x%" PRIxPTR ", data=0x%" PRIxPTR,
+ (uintptr_t)this, (uintptr_t)data);
+ I(dom, fill == 0);
+ dom->free(data);
+}
+
+template <typename T> T *&
+ptr_vec<T>::operator[](size_t offset) {
+ I(dom, data[offset]->idx == offset);
+ return data[offset];
+}
+
+template <typename T>
+void
+ptr_vec<T>::push(T *p)
+{
+ I(dom, data);
+ I(dom, fill <= alloc);
+ if (fill == alloc) {
+ alloc *= 2;
+ data = (T **)dom->realloc(data, alloc * sizeof(T*));
+ I(dom, data);
+ }
+ I(dom, fill < alloc);
+ p->idx = fill;
+ data[fill++] = p;
+}
+
+template <typename T>
+T *
+ptr_vec<T>::pop()
+{
+ return data[--fill];
+}
+
+template <typename T>
+void
+ptr_vec<T>::trim(size_t sz)
+{
+ I(dom, data);
+ if (sz <= (alloc / 4) &&
+ (alloc / 2) >= INIT_SIZE) {
+ alloc /= 2;
+ I(dom, alloc >= fill);
+ data = (T **)dom->realloc(data, alloc * sizeof(T*));
+ I(dom, data);
+ }
+}
+
+template <typename T>
+void
+ptr_vec<T>::swapdel(T *item)
+{
+ /* Swap the endpoint into i and decr fill. */
+ I(dom, data);
+ I(dom, fill > 0);
+ I(dom, item->idx < fill);
+ fill--;
+ if (fill > 0) {
+ T *subst = data[fill];
+ size_t idx = item->idx;
+ data[idx] = subst;
+ subst->idx = idx;
+ }
+}
+
+// Inline fn used regularly elsewhere.
+
+static inline size_t
+next_power_of_two(size_t s)
+{
+ size_t tmp = s - 1;
+ tmp |= tmp >> 1;
+ tmp |= tmp >> 2;
+ tmp |= tmp >> 4;
+ tmp |= tmp >> 8;
+ tmp |= tmp >> 16;
+#if SIZE_MAX == UINT64_MAX
+ tmp |= tmp >> 32;
+#endif
+ return tmp + 1;
+}
+
+// Vectors (rust-user-code level).
+
+struct
+rust_vec : public rc_base<rust_vec>
+{
+ size_t alloc;
+ size_t fill;
+ uint8_t data[];
+ rust_vec(rust_dom *dom, size_t alloc, size_t fill, uint8_t const *d) :
+ alloc(alloc),
+ fill(fill)
+ {
+ if (d || fill) {
+ I(dom, d);
+ I(dom, fill);
+ memcpy(&data[0], d, fill);
+ }
+ }
+ ~rust_vec() {}
+};
+
+// Rust types vec and str look identical from our perspective.
+typedef rust_vec rust_str;
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
+
+#endif
diff --git a/src/rt/sync/fair_ticket_lock.cpp b/src/rt/sync/fair_ticket_lock.cpp
new file mode 100644
index 00000000..0306ee1d
--- /dev/null
+++ b/src/rt/sync/fair_ticket_lock.cpp
@@ -0,0 +1,43 @@
+/*
+ * This works well as long as the number of contending threads
+ * is less than the number of processors. This is because of
+ * the fair locking scheme. If the thread that is next in line
+ * for acquiring the lock is not currently running, no other
+ * thread can acquire the lock. This is terrible for performance,
+ * and it seems that all fair locking schemes suffer from this
+ * behavior.
+ */
+
+// #define TRACE
+
+fair_ticket_lock::fair_ticket_lock() {
+ next_ticket = now_serving = 0;
+}
+
+fair_ticket_lock::~fair_ticket_lock() {
+
+}
+
+void fair_ticket_lock::lock() {
+ unsigned ticket = __sync_fetch_and_add(&next_ticket, 1);
+ while (now_serving != ticket) {
+ pause();
+ }
+#ifdef TRACE
+ printf("locked nextTicket: %d nowServing: %d",
+ next_ticket, now_serving);
+#endif
+}
+
+void fair_ticket_lock::unlock() {
+ now_serving++;
+#ifdef TRACE
+ printf("unlocked nextTicket: %d nowServing: %d",
+ next_ticket, now_serving);
+#endif
+}
+
+void fair_ticket_lock::pause() {
+ asm volatile("pause\n" : : : "memory");
+}
+
diff --git a/src/rt/sync/fair_ticket_lock.h b/src/rt/sync/fair_ticket_lock.h
new file mode 100644
index 00000000..c34c9041
--- /dev/null
+++ b/src/rt/sync/fair_ticket_lock.h
@@ -0,0 +1,15 @@
+#ifndef FAIR_TICKET_LOCK_H
+#define FAIR_TICKET_LOCK_H
+
+class fair_ticket_lock {
+ unsigned next_ticket;
+ unsigned now_serving;
+ void pause();
+public:
+ fair_ticket_lock();
+ virtual ~fair_ticket_lock();
+ void lock();
+ void unlock();
+};
+
+#endif /* FAIR_TICKET_LOCK_H */
diff --git a/src/rt/sync/lock_free_queue.cpp b/src/rt/sync/lock_free_queue.cpp
new file mode 100644
index 00000000..9d1081de
--- /dev/null
+++ b/src/rt/sync/lock_free_queue.cpp
@@ -0,0 +1,37 @@
+/*
+ * Interrupt transparent queue, Schoen et. al, "On Interrupt-Transparent
+ * Synchronization in an Embedded Object-Oriented Operating System", 2000.
+ * enqueue() is allowed to interrupt enqueue() and dequeue(), however,
+ * dequeue() is not allowed to interrupt itself.
+ */
+
+#include "lock_free_queue.h"
+
+lock_free_queue::lock_free_queue() :
+ tail(this) {
+}
+
+void lock_free_queue::enqueue(lock_free_queue_node *item) {
+ item->next = (lock_free_queue_node *) 0;
+ lock_free_queue_node *last = tail;
+ tail = item;
+ while (last->next)
+ last = last->next;
+ last->next = item;
+}
+
+lock_free_queue_node *lockfree_queue::dequeue() {
+ lock_free_queue_node *item = next;
+ if (item && !(next = item->next)) {
+ tail = (lock_free_queue_node *) this;
+ if (item->next) {
+ lock_free_queue_node *lost = item->next;
+ lock_free_queue_node *help;
+ do {
+ help = lost->next;
+ enqueue(lost);
+ } while ((lost = help) != (lock_free_queue_node *) 0);
+ }
+ }
+ return item;
+}
diff --git a/src/rt/sync/lock_free_queue.h b/src/rt/sync/lock_free_queue.h
new file mode 100644
index 00000000..fba4aa9a
--- /dev/null
+++ b/src/rt/sync/lock_free_queue.h
@@ -0,0 +1,15 @@
+#ifndef LOCK_FREE_QUEUE_H
+#define LOCK_FREE_QUEUE_H
+
+class lock_free_queue_node {
+ lock_free_queue_node *next;
+};
+
+class lock_free_queue {
+public:
+ lock_free_queue();
+ void enqueue(lock_free_queue_node *item);
+ lock_free_queue_node *dequeue();
+};
+
+#endif /* LOCK_FREE_QUEUE_H */
diff --git a/src/rt/sync/spin_lock.cpp b/src/rt/sync/spin_lock.cpp
new file mode 100644
index 00000000..11a5cb20
--- /dev/null
+++ b/src/rt/sync/spin_lock.cpp
@@ -0,0 +1,47 @@
+/*
+ * Your average spin lock.
+ */
+
+#include "globals.h"
+
+// #define TRACE
+
+spin_lock::spin_lock() {
+ unlock();
+}
+
+spin_lock::~spin_lock() {
+}
+
+static inline unsigned xchg32(void *ptr, unsigned x) {
+ __asm__ __volatile__("xchgl %0,%1"
+ :"=r" ((unsigned) x)
+ :"m" (*(volatile unsigned *)ptr), "0" (x)
+ :"memory");
+ return x;
+}
+
+void spin_lock::lock() {
+ while (true) {
+ if (!xchg32(&ticket, 1)) {
+ return;
+ }
+ while (ticket) {
+ pause();
+ }
+ }
+#ifdef TRACE
+ printf(" lock: %d", ticket);
+#endif
+}
+
+void spin_lock::unlock() {
+ ticket = 0;
+#ifdef TRACE
+ printf("unlock:");
+#endif
+}
+
+void spin_lock::pause() {
+ asm volatile("pause\n" : : : "memory");
+}
diff --git a/src/rt/sync/spin_lock.h b/src/rt/sync/spin_lock.h
new file mode 100644
index 00000000..3684c23a
--- /dev/null
+++ b/src/rt/sync/spin_lock.h
@@ -0,0 +1,14 @@
+#ifndef UNFAIR_TICKET_LOCK_H
+#define UNFAIR_TICKET_LOCK_H
+
+class spin_lock {
+ unsigned ticket;
+ void pause();
+public:
+ spin_lock();
+ virtual ~spin_lock();
+ void lock();
+ void unlock();
+};
+
+#endif /* UNFAIR_TICKET_LOCK_H */
diff --git a/src/rt/uthash/uthash.h b/src/rt/uthash/uthash.h
new file mode 100644
index 00000000..28021b61
--- /dev/null
+++ b/src/rt/uthash/uthash.h
@@ -0,0 +1,766 @@
+/*
+Copyright (c) 2003-2009, Troy D. Hanson http://uthash.sourceforge.net
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef UTHASH_H
+#define UTHASH_H
+
+#include <string.h> /* memcmp,strlen */
+#include <stddef.h> /* ptrdiff_t */
+#include <inttypes.h> /* uint32_t etc */
+
+#define UTHASH_VERSION 1.6
+
+/* C++ requires extra stringent casting */
+#if defined __cplusplus
+#define TYPEOF(x) (typeof(x))
+#else
+#define TYPEOF(x)
+#endif
+
+
+#define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */
+#define uthash_bkt_malloc(sz) malloc(sz) /* malloc fcn for UT_hash_bucket's */
+#define uthash_bkt_free(ptr) free(ptr) /* free fcn for UT_hash_bucket's */
+#define uthash_tbl_malloc(sz) malloc(sz) /* malloc fcn for UT_hash_table */
+#define uthash_tbl_free(ptr) free(ptr) /* free fcn for UT_hash_table */
+
+#define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
+#define uthash_expand_fyi(tbl) /* can be defined to log expands */
+
+/* initial number of buckets */
+#define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */
+#define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */
+#define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */
+
+/* calculate the element whose hash handle address is hhe */
+#define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)hhp) - (tbl)->hho))
+
+#define HASH_FIND(hh,head,keyptr,keylen,out) \
+do { \
+ unsigned _hf_bkt,_hf_hashv; \
+ out=TYPEOF(out)head; \
+ if (head) { \
+ HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \
+ HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \
+ keyptr,keylen,out); \
+ } \
+} while (0)
+
+#define HASH_MAKE_TABLE(hh,head) \
+do { \
+ (head)->hh.tbl = (UT_hash_table*)uthash_tbl_malloc( \
+ sizeof(UT_hash_table)); \
+ if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \
+ memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \
+ (head)->hh.tbl->tail = &((head)->hh); \
+ (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
+ (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
+ (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
+ (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_bkt_malloc( \
+ HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
+ if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
+ memset((head)->hh.tbl->buckets, 0, \
+ HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
+} while(0)
+
+#define HASH_ADD(hh,head,fieldname,keylen_in,add) \
+ HASH_ADD_KEYPTR(hh,head,&add->fieldname,keylen_in,add)
+
+#define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
+do { \
+ unsigned _ha_bkt; \
+ (add)->hh.next = NULL; \
+ (add)->hh.key = (char*)keyptr; \
+ (add)->hh.keylen = keylen_in; \
+ if (!(head)) { \
+ head = (add); \
+ (head)->hh.prev = NULL; \
+ HASH_MAKE_TABLE(hh,head); \
+ } else { \
+ (head)->hh.tbl->tail->next = (add); \
+ (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
+ (head)->hh.tbl->tail = &((add)->hh); \
+ } \
+ (head)->hh.tbl->num_items++; \
+ (add)->hh.tbl = (head)->hh.tbl; \
+ HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \
+ (add)->hh.hashv, _ha_bkt); \
+ HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \
+ HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \
+ HASH_FSCK(hh,head); \
+} while(0)
+
+#define HASH_TO_BKT( hashv, num_bkts, bkt ) \
+do { \
+ bkt = ((hashv) & ((num_bkts) - 1)); \
+} while(0)
+
+/* delete "delptr" from the hash table.
+ * "the usual" patch-up process for the app-order doubly-linked-list.
+ * The use of _hd_hh_del below deserves special explanation.
+ * These used to be expressed using (delptr) but that led to a bug
+ * if someone used the same symbol for the head and deletee, like
+ * HASH_DELETE(hh,users,users);
+ * We want that to work, but by changing the head (users) below
+ * we were forfeiting our ability to further refer to the deletee (users)
+ * in the patch-up process. Solution: use scratch space in the table to
+ * copy the deletee pointer, then the latter references are via that
+ * scratch pointer rather than through the repointed (users) symbol.
+ */
+#define HASH_DELETE(hh,head,delptr) \
+do { \
+ unsigned _hd_bkt; \
+ struct UT_hash_handle *_hd_hh_del; \
+ if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \
+ uthash_bkt_free((head)->hh.tbl->buckets ); \
+ uthash_tbl_free((head)->hh.tbl); \
+ head = NULL; \
+ } else { \
+ _hd_hh_del = &((delptr)->hh); \
+ if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \
+ (head)->hh.tbl->tail = \
+ (UT_hash_handle*)((char*)((delptr)->hh.prev) + \
+ (head)->hh.tbl->hho); \
+ } \
+ if ((delptr)->hh.prev) { \
+ ((UT_hash_handle*)((char*)((delptr)->hh.prev) + \
+ (head)->hh.tbl->hho))->next = (delptr)->hh.next; \
+ } else { \
+ head = TYPEOF(head)((delptr)->hh.next); \
+ } \
+ if (_hd_hh_del->next) { \
+ ((UT_hash_handle*)((char*)_hd_hh_del->next + \
+ (head)->hh.tbl->hho))->prev = \
+ _hd_hh_del->prev; \
+ } \
+ HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
+ HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
+ (head)->hh.tbl->num_items--; \
+ } \
+ HASH_FSCK(hh,head); \
+} while (0)
+
+
+/* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
+#define HASH_FIND_STR(head,findstr,out) \
+ HASH_FIND(hh,head,findstr,strlen(findstr),out)
+#define HASH_ADD_STR(head,strfield,add) \
+ HASH_ADD(hh,head,strfield,strlen(add->strfield),add)
+#define HASH_FIND_INT(head,findint,out) \
+ HASH_FIND(hh,head,findint,sizeof(int),out)
+#define HASH_ADD_INT(head,intfield,add) \
+ HASH_ADD(hh,head,intfield,sizeof(int),add)
+#define HASH_DEL(head,delptr) \
+ HASH_DELETE(hh,head,delptr)
+
+/* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
+ * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
+ */
+#ifdef HASH_DEBUG
+#define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
+#define HASH_FSCK(hh,head) \
+do { \
+ unsigned _bkt_i; \
+ unsigned _count, _bkt_count; \
+ char *_prev; \
+ struct UT_hash_handle *_thh; \
+ if (head) { \
+ _count = 0; \
+ for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \
+ _bkt_count = 0; \
+ _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
+ _prev = NULL; \
+ while (_thh) { \
+ if (_prev != (char*)(_thh->hh_prev)) { \
+ HASH_OOPS("invalid hh_prev %p, actual %p\n", \
+ _thh->hh_prev, _prev ); \
+ } \
+ _bkt_count++; \
+ _prev = (char*)(_thh); \
+ _thh = _thh->hh_next; \
+ } \
+ _count += _bkt_count; \
+ if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
+ HASH_OOPS("invalid bucket count %d, actual %d\n", \
+ (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
+ } \
+ } \
+ if (_count != (head)->hh.tbl->num_items) { \
+ HASH_OOPS("invalid hh item count %d, actual %d\n", \
+ (head)->hh.tbl->num_items, _count ); \
+ } \
+ /* traverse hh in app order; check next/prev integrity, count */ \
+ _count = 0; \
+ _prev = NULL; \
+ _thh = &(head)->hh; \
+ while (_thh) { \
+ _count++; \
+ if (_prev !=(char*)(_thh->prev)) { \
+ HASH_OOPS("invalid prev %p, actual %p\n", \
+ _thh->prev, _prev ); \
+ } \
+ _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
+ _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \
+ (head)->hh.tbl->hho) : NULL ); \
+ } \
+ if (_count != (head)->hh.tbl->num_items) { \
+ HASH_OOPS("invalid app item count %d, actual %d\n", \
+ (head)->hh.tbl->num_items, _count ); \
+ } \
+ } \
+} while (0)
+#else
+#define HASH_FSCK(hh,head)
+#endif
+
+/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
+ * the descriptor to which this macro is defined for tuning the hash function.
+ * The app can #include <unistd.h> to get the prototype for write(2). */
+#ifdef HASH_EMIT_KEYS
+#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
+do { \
+ unsigned _klen = fieldlen; \
+ write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
+ write(HASH_EMIT_KEYS, keyptr, fieldlen); \
+} while (0)
+#else
+#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
+#endif
+
+/* default to MurmurHash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
+#ifdef HASH_FUNCTION
+#define HASH_FCN HASH_FUNCTION
+#else
+#define HASH_FCN HASH_MUR
+#endif
+
+/* The Bernstein hash function, used in Perl prior to v5.6 */
+#define HASH_BER(key,keylen,num_bkts,hashv,bkt) \
+do { \
+ unsigned _hb_keylen=keylen; \
+ char *_hb_key=(char*)key; \
+ (hashv) = 0; \
+ while (_hb_keylen--) { (hashv) = ((hashv) * 33) + *_hb_key++; } \
+ bkt = (hashv) & (num_bkts-1); \
+} while (0)
+
+
+/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
+ * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
+#define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \
+do { \
+ unsigned _sx_i; \
+ char *_hs_key=(char*)key; \
+ hashv = 0; \
+ for(_sx_i=0; _sx_i < keylen; _sx_i++) \
+ hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
+ bkt = hashv & (num_bkts-1); \
+} while (0)
+
+#define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \
+do { \
+ unsigned _fn_i; \
+ char *_hf_key=(char*)key; \
+ hashv = 2166136261UL; \
+ for(_fn_i=0; _fn_i < keylen; _fn_i++) \
+ hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \
+ bkt = hashv & (num_bkts-1); \
+} while(0);
+
+#define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \
+do { \
+ unsigned _ho_i; \
+ char *_ho_key=(char*)key; \
+ hashv = 0; \
+ for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
+ hashv += _ho_key[_ho_i]; \
+ hashv += (hashv << 10); \
+ hashv ^= (hashv >> 6); \
+ } \
+ hashv += (hashv << 3); \
+ hashv ^= (hashv >> 11); \
+ hashv += (hashv << 15); \
+ bkt = hashv & (num_bkts-1); \
+} while(0)
+
+#define HASH_JEN_MIX(a,b,c) \
+do { \
+ a -= b; a -= c; a ^= ( c >> 13 ); \
+ b -= c; b -= a; b ^= ( a << 8 ); \
+ c -= a; c -= b; c ^= ( b >> 13 ); \
+ a -= b; a -= c; a ^= ( c >> 12 ); \
+ b -= c; b -= a; b ^= ( a << 16 ); \
+ c -= a; c -= b; c ^= ( b >> 5 ); \
+ a -= b; a -= c; a ^= ( c >> 3 ); \
+ b -= c; b -= a; b ^= ( a << 10 ); \
+ c -= a; c -= b; c ^= ( b >> 15 ); \
+} while (0)
+
+#define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \
+do { \
+ unsigned _hj_i,_hj_j,_hj_k; \
+ char *_hj_key=(char*)key; \
+ hashv = 0xfeedbeef; \
+ _hj_i = _hj_j = 0x9e3779b9; \
+ _hj_k = keylen; \
+ while (_hj_k >= 12) { \
+ _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
+ + ( (unsigned)_hj_key[2] << 16 ) \
+ + ( (unsigned)_hj_key[3] << 24 ) ); \
+ _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
+ + ( (unsigned)_hj_key[6] << 16 ) \
+ + ( (unsigned)_hj_key[7] << 24 ) ); \
+ hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
+ + ( (unsigned)_hj_key[10] << 16 ) \
+ + ( (unsigned)_hj_key[11] << 24 ) ); \
+ \
+ HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
+ \
+ _hj_key += 12; \
+ _hj_k -= 12; \
+ } \
+ hashv += keylen; \
+ switch ( _hj_k ) { \
+ case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); \
+ case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); \
+ case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); \
+ case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); \
+ case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); \
+ case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); \
+ case 5: _hj_j += _hj_key[4]; \
+ case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); \
+ case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); \
+ case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); \
+ case 1: _hj_i += _hj_key[0]; \
+ } \
+ HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
+ bkt = hashv & (num_bkts-1); \
+} while(0)
+
+/* The Paul Hsieh hash function */
+#undef get16bits
+#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
+ || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
+#define get16bits(d) (*((const uint16_t *) (d)))
+#endif
+
+#if !defined (get16bits)
+#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
+ +(uint32_t)(((const uint8_t *)(d))[0]) )
+#endif
+#define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \
+do { \
+ char *_sfh_key=(char*)key; \
+ hashv = 0xcafebabe; \
+ uint32_t _sfh_tmp, _sfh_len = keylen; \
+ \
+ int _sfh_rem = _sfh_len & 3; \
+ _sfh_len >>= 2; \
+ \
+ /* Main loop */ \
+ for (;_sfh_len > 0; _sfh_len--) { \
+ hashv += get16bits (_sfh_key); \
+ _sfh_tmp = (get16bits (_sfh_key+2) << 11) ^ hashv; \
+ hashv = (hashv << 16) ^ _sfh_tmp; \
+ _sfh_key += 2*sizeof (uint16_t); \
+ hashv += hashv >> 11; \
+ } \
+ \
+ /* Handle end cases */ \
+ switch (_sfh_rem) { \
+ case 3: hashv += get16bits (_sfh_key); \
+ hashv ^= hashv << 16; \
+ hashv ^= _sfh_key[sizeof (uint16_t)] << 18; \
+ hashv += hashv >> 11; \
+ break; \
+ case 2: hashv += get16bits (_sfh_key); \
+ hashv ^= hashv << 11; \
+ hashv += hashv >> 17; \
+ break; \
+ case 1: hashv += *_sfh_key; \
+ hashv ^= hashv << 10; \
+ hashv += hashv >> 1; \
+ } \
+ \
+ /* Force "avalanching" of final 127 bits */ \
+ hashv ^= hashv << 3; \
+ hashv += hashv >> 5; \
+ hashv ^= hashv << 4; \
+ hashv += hashv >> 17; \
+ hashv ^= hashv << 25; \
+ hashv += hashv >> 6; \
+ bkt = hashv & (num_bkts-1); \
+} while(0);
+
+/* Austin Appleby's MurmurHash */
+#define HASH_MUR(key,keylen,num_bkts,hashv,bkt) \
+do { \
+ const unsigned int _mur_m = 0x5bd1e995; \
+ const int _mur_r = 24; \
+ hashv = 0xcafebabe ^ keylen; \
+ char *_mur_key = (char *)key; \
+ uint32_t _mur_tmp, _mur_len = keylen; \
+ \
+ for (;_mur_len >= 4; _mur_len-=4) { \
+ _mur_tmp = *(uint32_t *)_mur_key; \
+ _mur_tmp *= _mur_m; \
+ _mur_tmp ^= _mur_tmp >> _mur_r; \
+ _mur_tmp *= _mur_m; \
+ hashv *= _mur_m; \
+ hashv ^= _mur_tmp; \
+ _mur_key += 4; \
+ } \
+ \
+ switch(_mur_len) \
+ { \
+ case 3: hashv ^= _mur_key[2] << 16; \
+ case 2: hashv ^= _mur_key[1] << 8; \
+ case 1: hashv ^= _mur_key[0]; \
+ hashv *= _mur_m; \
+ }; \
+ \
+ hashv ^= hashv >> 13; \
+ hashv *= _mur_m; \
+ hashv ^= hashv >> 15; \
+ \
+ bkt = hashv & (num_bkts-1); \
+} while(0)
+
+/* key comparison function; return 0 if keys equal */
+#define HASH_KEYCMP(a,b,len) memcmp(a,b,len)
+
+/* iterate over items in a known bucket to find desired item */
+#define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \
+out = TYPEOF(out)((head.hh_head) ? ELMT_FROM_HH(tbl,head.hh_head) : NULL); \
+while (out) { \
+ if (out->hh.keylen == keylen_in) { \
+ if ((HASH_KEYCMP(out->hh.key,keyptr,keylen_in)) == 0) break; \
+ } \
+ out= TYPEOF(out)((out->hh.hh_next) ? \
+ ELMT_FROM_HH(tbl,out->hh.hh_next) : NULL); \
+}
+
+/* add an item to a bucket */
+#define HASH_ADD_TO_BKT(head,addhh) \
+do { \
+ head.count++; \
+ (addhh)->hh_next = head.hh_head; \
+ (addhh)->hh_prev = NULL; \
+ if (head.hh_head) { (head).hh_head->hh_prev = (addhh); } \
+ (head).hh_head=addhh; \
+ if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \
+ && (addhh)->tbl->noexpand != 1) { \
+ HASH_EXPAND_BUCKETS((addhh)->tbl); \
+ } \
+} while(0)
+
+/* remove an item from a given bucket */
+#define HASH_DEL_IN_BKT(hh,head,hh_del) \
+ (head).count--; \
+ if ((head).hh_head == hh_del) { \
+ (head).hh_head = hh_del->hh_next; \
+ } \
+ if (hh_del->hh_prev) { \
+ hh_del->hh_prev->hh_next = hh_del->hh_next; \
+ } \
+ if (hh_del->hh_next) { \
+ hh_del->hh_next->hh_prev = hh_del->hh_prev; \
+ }
+
+/* Bucket expansion has the effect of doubling the number of buckets
+ * and redistributing the items into the new buckets. Ideally the
+ * items will distribute more or less evenly into the new buckets
+ * (the extent to which this is true is a measure of the quality of
+ * the hash function as it applies to the key domain).
+ *
+ * With the items distributed into more buckets, the chain length
+ * (item count) in each bucket is reduced. Thus by expanding buckets
+ * the hash keeps a bound on the chain length. This bounded chain
+ * length is the essence of how a hash provides constant time lookup.
+ *
+ * The calculation of tbl->ideal_chain_maxlen below deserves some
+ * explanation. First, keep in mind that we're calculating the ideal
+ * maximum chain length based on the *new* (doubled) bucket count.
+ * In fractions this is just n/b (n=number of items,b=new num buckets).
+ * Since the ideal chain length is an integer, we want to calculate
+ * ceil(n/b). We don't depend on floating point arithmetic in this
+ * hash, so to calculate ceil(n/b) with integers we could write
+ *
+ * ceil(n/b) = (n/b) + ((n%b)?1:0)
+ *
+ * and in fact a previous version of this hash did just that.
+ * But now we have improved things a bit by recognizing that b is
+ * always a power of two. We keep its base 2 log handy (call it lb),
+ * so now we can write this with a bit shift and logical AND:
+ *
+ * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
+ *
+ */
+#define HASH_EXPAND_BUCKETS(tbl) \
+do { \
+ unsigned _he_bkt; \
+ unsigned _he_bkt_i; \
+ struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
+ UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
+ _he_new_buckets = (UT_hash_bucket*)uthash_bkt_malloc( \
+ 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
+ if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \
+ memset(_he_new_buckets, 0, \
+ 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
+ tbl->ideal_chain_maxlen = \
+ (tbl->num_items >> (tbl->log2_num_buckets+1)) + \
+ ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \
+ tbl->nonideal_items = 0; \
+ for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \
+ { \
+ _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \
+ while (_he_thh) { \
+ _he_hh_nxt = _he_thh->hh_next; \
+ HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \
+ _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \
+ if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \
+ tbl->nonideal_items++; \
+ _he_newbkt->expand_mult = _he_newbkt->count / \
+ tbl->ideal_chain_maxlen; \
+ } \
+ _he_thh->hh_prev = NULL; \
+ _he_thh->hh_next = _he_newbkt->hh_head; \
+ if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \
+ _he_thh; \
+ _he_newbkt->hh_head = _he_thh; \
+ _he_thh = _he_hh_nxt; \
+ } \
+ } \
+ tbl->num_buckets *= 2; \
+ tbl->log2_num_buckets++; \
+ uthash_bkt_free( tbl->buckets ); \
+ tbl->buckets = _he_new_buckets; \
+ tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \
+ (tbl->ineff_expands+1) : 0; \
+ if (tbl->ineff_expands > 1) { \
+ tbl->noexpand=1; \
+ uthash_noexpand_fyi(tbl); \
+ } \
+ uthash_expand_fyi(tbl); \
+} while(0)
+
+
+/* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
+/* Note that HASH_SORT assumes the hash handle name to be hh.
+ * HASH_SRT was added to allow the hash handle name to be passed in. */
+#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
+#define HASH_SRT(hh,head,cmpfcn) \
+do { \
+ unsigned _hs_i; \
+ unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
+ struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
+ if (head) { \
+ _hs_insize = 1; \
+ _hs_looping = 1; \
+ _hs_list = &((head)->hh); \
+ while (_hs_looping) { \
+ _hs_p = _hs_list; \
+ _hs_list = NULL; \
+ _hs_tail = NULL; \
+ _hs_nmerges = 0; \
+ while (_hs_p) { \
+ _hs_nmerges++; \
+ _hs_q = _hs_p; \
+ _hs_psize = 0; \
+ for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \
+ _hs_psize++; \
+ _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
+ ((void*)((char*)(_hs_q->next) + \
+ (head)->hh.tbl->hho)) : NULL); \
+ if (! (_hs_q) ) break; \
+ } \
+ _hs_qsize = _hs_insize; \
+ while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \
+ if (_hs_psize == 0) { \
+ _hs_e = _hs_q; \
+ _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
+ ((void*)((char*)(_hs_q->next) + \
+ (head)->hh.tbl->hho)) : NULL); \
+ _hs_qsize--; \
+ } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \
+ _hs_e = _hs_p; \
+ _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
+ ((void*)((char*)(_hs_p->next) + \
+ (head)->hh.tbl->hho)) : NULL); \
+ _hs_psize--; \
+ } else if (( \
+ cmpfcn(TYPEOF(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \
+ TYPEOF(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \
+ ) <= 0) { \
+ _hs_e = _hs_p; \
+ _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
+ ((void*)((char*)(_hs_p->next) + \
+ (head)->hh.tbl->hho)) : NULL); \
+ _hs_psize--; \
+ } else { \
+ _hs_e = _hs_q; \
+ _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
+ ((void*)((char*)(_hs_q->next) + \
+ (head)->hh.tbl->hho)) : NULL); \
+ _hs_qsize--; \
+ } \
+ if ( _hs_tail ) { \
+ _hs_tail->next = ((_hs_e) ? \
+ ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \
+ } else { \
+ _hs_list = _hs_e; \
+ } \
+ _hs_e->prev = ((_hs_tail) ? \
+ ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \
+ _hs_tail = _hs_e; \
+ } \
+ _hs_p = _hs_q; \
+ } \
+ _hs_tail->next = NULL; \
+ if ( _hs_nmerges <= 1 ) { \
+ _hs_looping=0; \
+ (head)->hh.tbl->tail = _hs_tail; \
+ (head) = TYPEOF(head)ELMT_FROM_HH((head)->hh.tbl, _hs_list); \
+ } \
+ _hs_insize *= 2; \
+ } \
+ HASH_FSCK(hh,head); \
+ } \
+} while (0)
+
+/* This function selects items from one hash into another hash.
+ * The end result is that the selected items have dual presence
+ * in both hashes. There is no copy of the items made; rather
+ * they are added into the new hash through a secondary hash
+ * hash handle that must be present in the structure. */
+#define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
+do { \
+ unsigned _src_bkt, _dst_bkt; \
+ void *_last_elt=NULL, *_elt; \
+ UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
+ ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
+ if (src) { \
+ for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
+ for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
+ _src_hh; \
+ _src_hh = _src_hh->hh_next) { \
+ _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
+ if (cond(_elt)) { \
+ _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
+ _dst_hh->key = _src_hh->key; \
+ _dst_hh->keylen = _src_hh->keylen; \
+ _dst_hh->hashv = _src_hh->hashv; \
+ _dst_hh->prev = _last_elt; \
+ _dst_hh->next = NULL; \
+ if (_last_elt_hh) { _last_elt_hh->next = _elt; } \
+ if (!dst) { \
+ dst = TYPEOF(dst)_elt; \
+ HASH_MAKE_TABLE(hh_dst,dst); \
+ } else { \
+ _dst_hh->tbl = (dst)->hh_dst.tbl; \
+ } \
+ HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
+ HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \
+ (dst)->hh_dst.tbl->num_items++; \
+ _last_elt = _elt; \
+ _last_elt_hh = _dst_hh; \
+ } \
+ } \
+ } \
+ } \
+ HASH_FSCK(hh_dst,dst); \
+} while (0)
+
+#define HASH_CLEAR(hh,head) \
+do { \
+ if (head) { \
+ uthash_bkt_free((head)->hh.tbl->buckets ); \
+ uthash_tbl_free((head)->hh.tbl); \
+ (head)=NULL; \
+ } \
+} while(0)
+
+/* obtain a count of items in the hash */
+#define HASH_COUNT(head) HASH_CNT(hh,head)
+#define HASH_CNT(hh,head) (head?(head->hh.tbl->num_items):0)
+
+typedef struct UT_hash_bucket {
+ struct UT_hash_handle *hh_head;
+ unsigned count;
+
+ /* expand_mult is normally set to 0. In this situation, the max chain length
+ * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
+ * the bucket's chain exceeds this length, bucket expansion is triggered).
+ * However, setting expand_mult to a non-zero value delays bucket expansion
+ * (that would be triggered by additions to this particular bucket)
+ * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
+ * (The multiplier is simply expand_mult+1). The whole idea of this
+ * multiplier is to reduce bucket expansions, since they are expensive, in
+ * situations where we know that a particular bucket tends to be overused.
+ * It is better to let its chain length grow to a longer yet-still-bounded
+ * value, than to do an O(n) bucket expansion too often.
+ */
+ unsigned expand_mult;
+
+} UT_hash_bucket;
+
+typedef struct UT_hash_table {
+ UT_hash_bucket *buckets;
+ unsigned num_buckets, log2_num_buckets;
+ unsigned num_items;
+ struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
+ ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
+
+ /* in an ideal situation (all buckets used equally), no bucket would have
+ * more than ceil(#items/#buckets) items. that's the ideal chain length. */
+ unsigned ideal_chain_maxlen;
+
+ /* nonideal_items is the number of items in the hash whose chain position
+ * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
+ * hash distribution; reaching them in a chain traversal takes >ideal steps */
+ unsigned nonideal_items;
+
+ /* ineffective expands occur when a bucket doubling was performed, but
+ * afterward, more than half the items in the hash had nonideal chain
+ * positions. If this happens on two consecutive expansions we inhibit any
+ * further expansion, as it's not helping; this happens when the hash
+ * function isn't a good fit for the key domain. When expansion is inhibited
+ * the hash will still work, albeit no longer in constant time. */
+ unsigned ineff_expands, noexpand;
+
+
+} UT_hash_table;
+
+
+typedef struct UT_hash_handle {
+ struct UT_hash_table *tbl;
+ void *prev; /* prev element in app order */
+ void *next; /* next element in app order */
+ struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
+ struct UT_hash_handle *hh_next; /* next hh in bucket order */
+ void *key; /* ptr to enclosing struct's key */
+ unsigned keylen; /* enclosing struct's key len */
+ unsigned hashv; /* result of hash-fcn(key) */
+} UT_hash_handle;
+
+#endif /* UTHASH_H */
diff --git a/src/rt/uthash/utlist.h b/src/rt/uthash/utlist.h
new file mode 100644
index 00000000..a33615e1
--- /dev/null
+++ b/src/rt/uthash/utlist.h
@@ -0,0 +1,280 @@
+/*
+Copyright (c) 2007-2009, Troy D. Hanson
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef UTLIST_H
+#define UTLIST_H
+
+#define UTLIST_VERSION 1.0
+
+/* C++ requires extra stringent casting */
+#if defined __cplusplus
+#define LTYPEOF(x) (typeof(x))
+#else
+#define LTYPEOF(x)
+#endif
+/*
+ * This file contains macros to manipulate singly and doubly-linked lists.
+ *
+ * 1. LL_ macros: singly-linked lists.
+ * 2. DL_ macros: doubly-linked lists.
+ * 3. CDL_ macros: circular doubly-linked lists.
+ *
+ * To use singly-linked lists, your structure must have a "next" pointer.
+ * To use doubly-linked lists, your structure must "prev" and "next" pointers.
+ * Either way, the pointer to the head of the list must be initialized to NULL.
+ *
+ * ----------------.EXAMPLE -------------------------
+ * struct item {
+ * int id;
+ * struct item *prev, *next;
+ * }
+ *
+ * struct item *list = NULL:
+ *
+ * int main() {
+ * struct item *item;
+ * ... allocate and populate item ...
+ * DL_APPEND(list, item);
+ * }
+ * --------------------------------------------------
+ *
+ * For doubly-linked lists, the append and delete macros are O(1)
+ * For singly-linked lists, append and delete are O(n) but prepend is O(1)
+ * The sort macro is O(n log(n)) for all types of single/double/circular lists.
+ */
+
+/******************************************************************************
+ * The SORT macros *
+ *****************************************************************************/
+#define LL_SORT(l,cmp) \
+ LISTSORT(l,0,0,FIELD_OFFSET(l,next),cmp)
+#define DL_SORT(l,cmp) \
+ LISTSORT(l,0,FIELD_OFFSET(l,prev),FIELD_OFFSET(l,next),cmp)
+#define CDL_SORT(l,cmp) \
+ LISTSORT(l,1,FIELD_OFFSET(l,prev),FIELD_OFFSET(l,next),cmp)
+
+/* The macros can't assume or cast to the caller's list element type. So we use
+ * a couple tricks when we need to deal with those element's prev/next pointers.
+ * Basically we use char pointer arithmetic to get those field offsets. */
+#define FIELD_OFFSET(ptr,field) ((char*)&((ptr)->field) - (char*)(ptr))
+#define LNEXT(e,no) (*(char**)(((char*)e) + no))
+#define LPREV(e,po) (*(char**)(((char*)e) + po))
+/******************************************************************************
+ * The LISTSORT macro is an adaptation of Simon Tatham's O(n log(n)) mergesort*
+ * Unwieldy variable names used here to avoid shadowing passed-in variables. *
+ *****************************************************************************/
+#define LISTSORT(list, is_circular, po, no, cmp) \
+do { \
+ void *_ls_p, *_ls_q, *_ls_e, *_ls_tail, *_ls_oldhead; \
+ int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \
+ int _ls_is_double = (po==0) ? 0 : 1; \
+ if (list) { \
+ _ls_insize = 1; \
+ _ls_looping = 1; \
+ while (_ls_looping) { \
+ _ls_p = list; \
+ _ls_oldhead = list; \
+ list = NULL; \
+ _ls_tail = NULL; \
+ _ls_nmerges = 0; \
+ while (_ls_p) { \
+ _ls_nmerges++; \
+ _ls_q = _ls_p; \
+ _ls_psize = 0; \
+ for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \
+ _ls_psize++; \
+ if (is_circular) { \
+ _ls_q = ((LNEXT(_ls_q,no) == _ls_oldhead) ? NULL : LNEXT(_ls_q,no)); \
+ } else { \
+ _ls_q = LNEXT(_ls_q,no); \
+ } \
+ if (!_ls_q) break; \
+ } \
+ _ls_qsize = _ls_insize; \
+ while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \
+ if (_ls_psize == 0) { \
+ _ls_e = _ls_q; _ls_q = LNEXT(_ls_q,no); _ls_qsize--; \
+ if (is_circular && _ls_q == _ls_oldhead) { _ls_q = NULL; } \
+ } else if (_ls_qsize == 0 || !_ls_q) { \
+ _ls_e = _ls_p; _ls_p = LNEXT(_ls_p,no); _ls_psize--; \
+ if (is_circular && (_ls_p == _ls_oldhead)) { _ls_p = NULL; } \
+ } else if (cmp(LTYPEOF(list)_ls_p,LTYPEOF(list)_ls_q) <= 0) { \
+ _ls_e = _ls_p; _ls_p = LNEXT(_ls_p,no); _ls_psize--; \
+ if (is_circular && (_ls_p == _ls_oldhead)) { _ls_p = NULL; } \
+ } else { \
+ _ls_e = _ls_q; _ls_q = LNEXT(_ls_q,no); _ls_qsize--; \
+ if (is_circular && (_ls_q == _ls_oldhead)) { _ls_q = NULL; } \
+ } \
+ if (_ls_tail) { \
+ LNEXT(_ls_tail,no) = (char*)_ls_e; \
+ } else { \
+ list = LTYPEOF(list)_ls_e; \
+ } \
+ if (_ls_is_double) { \
+ LPREV(_ls_e,po) = (char*)_ls_tail; \
+ } \
+ _ls_tail = _ls_e; \
+ } \
+ _ls_p = _ls_q; \
+ } \
+ if (is_circular) { \
+ LNEXT(_ls_tail,no) = (char*)list; \
+ if (_ls_is_double) { \
+ LPREV(list,po) = (char*)_ls_tail; \
+ } \
+ } else { \
+ LNEXT(_ls_tail,no) = NULL; \
+ } \
+ if (_ls_nmerges <= 1) { \
+ _ls_looping=0; \
+ } \
+ _ls_insize *= 2; \
+ } \
+ } \
+} while (0)
+
+/******************************************************************************
+ * singly linked list macros (non-circular) *
+ *****************************************************************************/
+#define LL_PREPEND(head,add) \
+do { \
+ (add)->next = head; \
+ head = add; \
+} while (0)
+
+#define LL_APPEND(head,add) \
+do { \
+ (add)->next=NULL; \
+ if (head) { \
+ char *_lla_el = (char*)(head); \
+ unsigned _lla_no = FIELD_OFFSET(head,next); \
+ while (LNEXT(_lla_el,_lla_no)) { _lla_el = LNEXT(_lla_el,_lla_no); } \
+ LNEXT(_lla_el,_lla_no)=(char*)(add); \
+ } else { \
+ (head)=(add); \
+ } \
+} while (0)
+
+#define LL_DELETE(head,del) \
+do { \
+ if ((head) == (del)) { \
+ (head)=(head)->next; \
+ } else { \
+ char *_lld_el = (char*)(head); \
+ unsigned _lld_no = FIELD_OFFSET(head,next); \
+ while (LNEXT(_lld_el,_lld_no) && (LNEXT(_lld_el,_lld_no) != (char*)(del))) { \
+ _lld_el = LNEXT(_lld_el,_lld_no); \
+ } \
+ if (LNEXT(_lld_el,_lld_no)) { \
+ LNEXT(_lld_el,_lld_no) = (char*)((del)->next); \
+ } \
+ } \
+} while (0)
+
+#define LL_FOREACH(head,el) \
+ for(el=head;el;el=el->next)
+
+/******************************************************************************
+ * doubly linked list macros (non-circular) *
+ *****************************************************************************/
+#define DL_PREPEND(head,add) \
+do { \
+ (add)->next = head; \
+ if (head) { \
+ (add)->prev = (head)->prev; \
+ (head)->prev = (add); \
+ } else { \
+ (add)->prev = (add); \
+ } \
+ (head) = (add); \
+} while (0)
+
+#define DL_APPEND(head,add) \
+do { \
+ if (head) { \
+ (add)->prev = (head)->prev; \
+ (head)->prev->next = (add); \
+ (head)->prev = (add); \
+ (add)->next = NULL; \
+ } else { \
+ (head)=(add); \
+ (head)->prev = (head); \
+ (head)->next = NULL; \
+ } \
+} while (0);
+
+#define DL_DELETE(head,del) \
+do { \
+ if ((del)->prev == (del)) { \
+ (head)=NULL; \
+ } else if ((del)==(head)) { \
+ (del)->next->prev = (del)->prev; \
+ (head) = (del)->next; \
+ } else { \
+ (del)->prev->next = (del)->next; \
+ if ((del)->next) { \
+ (del)->next->prev = (del)->prev; \
+ } else { \
+ (head)->prev = (del)->prev; \
+ } \
+ } \
+} while (0);
+
+
+#define DL_FOREACH(head,el) \
+ for(el=head;el;el=el->next)
+
+/******************************************************************************
+ * circular doubly linked list macros *
+ *****************************************************************************/
+#define CDL_PREPEND(head,add) \
+do { \
+ if (head) { \
+ (add)->prev = (head)->prev; \
+ (add)->next = (head); \
+ (head)->prev = (add); \
+ (add)->prev->next = (add); \
+ } else { \
+ (add)->prev = (add); \
+ (add)->next = (add); \
+ } \
+(head)=(add); \
+} while (0)
+
+#define CDL_DELETE(head,del) \
+do { \
+ if ( ((head)==(del)) && ((head)->next == (head))) { \
+ (head) = 0L; \
+ } else { \
+ (del)->next->prev = (del)->prev; \
+ (del)->prev->next = (del)->next; \
+ if ((del) == (head)) (head)=(del)->next; \
+ } \
+} while (0);
+
+#define CDL_FOREACH(head,el) \
+ for(el=head;el;el= (el->next==head ? 0L : el->next))
+
+
+#endif /* UTLIST_H */
+
diff --git a/src/rt/util/array_list.h b/src/rt/util/array_list.h
new file mode 100644
index 00000000..0d112575
--- /dev/null
+++ b/src/rt/util/array_list.h
@@ -0,0 +1,69 @@
+#ifndef ARRAY_LIST_H
+#define ARRAY_LIST_H
+
+/**
+ * A simple, resizable array list.
+ */
+template<typename T> class array_list {
+ static const size_t INITIAL_CAPACITY = 8;
+ size_t _size;
+ T * _data;
+ size_t _capacity;
+public:
+ array_list();
+ ~array_list();
+ size_t size();
+ void append(T value);
+ T replace(T old_value, T new_value);
+ size_t index_of(T value);
+ T & operator[](size_t index);
+};
+
+template<typename T> array_list<T>::array_list() {
+ _capacity = INITIAL_CAPACITY;
+ _data = (T *) malloc(sizeof(T) * _capacity);
+}
+
+template<typename T> array_list<T>::~array_list() {
+ delete _data;
+}
+
+template<typename T> size_t array_list<T>::size() {
+ return _size;
+}
+
+template<typename T> void array_list<T>::append(T value) {
+ if (_size == _capacity) {
+ _capacity = _capacity * 2;
+ _data = (T *) realloc(_data, _capacity * sizeof(T));
+ }
+ _data[_size++] = value;
+}
+
+/**
+ * Replaces the old_value in the list with the new_value.
+ * Returns the old_value if the replacement succeeded, or NULL otherwise.
+ */
+template<typename T> T array_list<T>::replace(T old_value, T new_value) {
+ int index = index_of(old_value);
+ if (index < 0) {
+ return NULL;
+ }
+ _data[index] = new_value;
+ return old_value;
+}
+
+template<typename T> size_t array_list<T>::index_of(T value) {
+ for (size_t i = 0; i < _size; i++) {
+ if (_data[i] == value) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+template<typename T> T & array_list<T>::operator[](size_t index) {
+ return _data[index];
+}
+
+#endif /* ARRAY_LIST_H */
diff --git a/src/rt/valgrind.h b/src/rt/valgrind.h
new file mode 100644
index 00000000..530fa184
--- /dev/null
+++ b/src/rt/valgrind.h
@@ -0,0 +1,3926 @@
+/* -*- c -*-
+ ----------------------------------------------------------------
+
+ Notice that the following BSD-style license applies to this one
+ file (valgrind.h) only. The rest of Valgrind is licensed under the
+ terms of the GNU General Public License, version 2, unless
+ otherwise indicated. See the COPYING file in the source
+ distribution for details.
+
+ ----------------------------------------------------------------
+
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2008 Julian Seward. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+ 3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ----------------------------------------------------------------
+
+ Notice that the above BSD-style license applies to this one file
+ (valgrind.h) only. The entire rest of Valgrind is licensed under
+ the terms of the GNU General Public License, version 2. See the
+ COPYING file in the source distribution for details.
+
+ ----------------------------------------------------------------
+*/
+
+
+/* This file is for inclusion into client (your!) code.
+
+ You can use these macros to manipulate and query Valgrind's
+ execution inside your own programs.
+
+ The resulting executables will still run without Valgrind, just a
+ little bit more slowly than they otherwise would, but otherwise
+ unchanged. When not running on valgrind, each client request
+ consumes very few (eg. 7) instructions, so the resulting performance
+ loss is negligible unless you plan to execute client requests
+ millions of times per second. Nevertheless, if that is still a
+ problem, you can compile with the NVALGRIND symbol defined (gcc
+ -DNVALGRIND) so that client requests are not even compiled in. */
+
+#ifndef __VALGRIND_H
+#define __VALGRIND_H
+
+#include <stdarg.h>
+
+/* Nb: this file might be included in a file compiled with -ansi. So
+ we can't use C++ style "//" comments nor the "asm" keyword (instead
+ use "__asm__"). */
+
+/* Derive some tags indicating what the target platform is. Note
+ that in this file we're using the compiler's CPP symbols for
+ identifying architectures, which are different to the ones we use
+ within the rest of Valgrind. Note, __powerpc__ is active for both
+ 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
+ latter (on Linux, that is). */
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+#if !defined(_AIX) && defined(__i386__)
+# define PLAT_x86_linux 1
+#elif !defined(_AIX) && defined(__x86_64__)
+# define PLAT_amd64_linux 1
+#elif !defined(_AIX) && defined(__powerpc__) && !defined(__powerpc64__)
+# define PLAT_ppc32_linux 1
+#elif !defined(_AIX) && defined(__powerpc__) && defined(__powerpc64__)
+# define PLAT_ppc64_linux 1
+#elif defined(_AIX) && defined(__64BIT__)
+# define PLAT_ppc64_aix5 1
+#elif defined(_AIX) && !defined(__64BIT__)
+# define PLAT_ppc32_aix5 1
+#endif
+
+
+/* If we're not compiling for our target platform, don't generate
+ any inline asms. */
+#if !defined(PLAT_x86_linux) && !defined(PLAT_amd64_linux) \
+ && !defined(PLAT_ppc32_linux) && !defined(PLAT_ppc64_linux) \
+ && !defined(PLAT_ppc32_aix5) && !defined(PLAT_ppc64_aix5)
+# if !defined(NVALGRIND)
+# define NVALGRIND 1
+# endif
+#endif
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
+/* in here of use to end-users -- skip to the next section. */
+/* ------------------------------------------------------------------ */
+
+#if defined(NVALGRIND)
+
+/* Define NVALGRIND to completely remove the Valgrind magic sequence
+ from the compiled code (analogous to NDEBUG's effects on
+ assert()) */
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { \
+ (_zzq_rlval) = (_zzq_default); \
+ }
+
+#else /* ! NVALGRIND */
+
+/* The following defines the magic code sequences which the JITter
+ spots and handles magically. Don't look too closely at them as
+ they will rot your brain.
+
+ The assembly code sequences for all architectures is in this one
+ file. This is because this file must be stand-alone, and we don't
+ want to have multiple files.
+
+ For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
+ value gets put in the return slot, so that everything works when
+ this is executed not under Valgrind. Args are passed in a memory
+ block, and so there's no intrinsic limit to the number that could
+ be passed, but it's currently five.
+
+ The macro args are:
+ _zzq_rlval result lvalue
+ _zzq_default default value (result returned when running on real CPU)
+ _zzq_request request code
+ _zzq_arg1..5 request params
+
+ The other two macros are used to support function wrapping, and are
+ a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
+ guest's NRADDR pseudo-register and whatever other information is
+ needed to safely run the call original from the wrapper: on
+ ppc64-linux, the R2 value at the divert point is also needed. This
+ information is abstracted into a user-visible type, OrigFn.
+
+ VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
+ guest, but guarantees that the branch instruction will not be
+ redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
+ branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
+ complete inline asm, since it needs to be combined with more magic
+ inline asm stuff to be useful.
+*/
+
+/* ------------------------- x86-linux ------------------------- */
+
+#if defined(PLAT_x86_linux)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "roll $3, %%edi ; roll $13, %%edi\n\t" \
+ "roll $29, %%edi ; roll $19, %%edi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EDX = client_request ( %EAX ) */ \
+ "xchgl %%ebx,%%ebx" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EAX = guest_NRADDR */ \
+ "xchgl %%ecx,%%ecx" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_EAX \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%EAX */ \
+ "xchgl %%edx,%%edx\n\t"
+#endif /* PLAT_x86_linux */
+
+/* ------------------------ amd64-linux ------------------------ */
+
+#if defined(PLAT_amd64_linux)
+
+typedef
+ struct {
+ unsigned long long int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
+ "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { volatile unsigned long long int _zzq_args[6]; \
+ volatile unsigned long long int _zzq_result; \
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %RDX = client_request ( %RAX ) */ \
+ "xchgq %%rbx,%%rbx" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned long long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %RAX = guest_NRADDR */ \
+ "xchgq %%rcx,%%rcx" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_RAX \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%RAX */ \
+ "xchgq %%rdx,%%rdx\n\t"
+#endif /* PLAT_amd64_linux */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned int _zzq_args[6]; \
+ unsigned int _zzq_result; \
+ unsigned int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 3,%1\n\t" /*default*/ \
+ "mr 4,%2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" /*result*/ \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_default), "b" (_zzq_ptr) \
+ : "cc", "memory", "r3", "r4"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "cc", "memory", "r3" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+typedef
+ struct {
+ unsigned long long int nraddr; /* where's the code? */
+ unsigned long long int r2; /* what tocptr do we need? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned long long int _zzq_args[6]; \
+ register unsigned long long int _zzq_result __asm__("r3"); \
+ register unsigned long long int* _zzq_ptr __asm__("r4"); \
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1" \
+ : "=r" (_zzq_result) \
+ : "0" (_zzq_default), "r" (_zzq_ptr) \
+ : "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned long long int __addr __asm__("r3"); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ unsigned int r2; /* what tocptr do we need? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned int _zzq_args[7]; \
+ register unsigned int _zzq_result; \
+ register unsigned int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ _zzq_args[6] = (unsigned int)(_zzq_default); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 4,%1\n\t" \
+ "lwz 3, 24(4)\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_ptr) \
+ : "r3", "r4", "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+typedef
+ struct {
+ unsigned long long int nraddr; /* where's the code? */
+ unsigned long long int r2; /* what tocptr do we need? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned long long int _zzq_args[7]; \
+ register unsigned long long int _zzq_result; \
+ register unsigned long long int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int long long)(_zzq_request); \
+ _zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int long long)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int long long)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int long long)(_zzq_arg5); \
+ _zzq_args[6] = (unsigned int long long)(_zzq_default); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 4,%1\n\t" \
+ "ld 3, 48(4)\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_ptr) \
+ : "r3", "r4", "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned long long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_aix5 */
+
+/* Insert assembly code for other platforms here... */
+
+#endif /* NVALGRIND */
+
+
+/* ------------------------------------------------------------------ */
+/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
+/* ugly. It's the least-worst tradeoff I can think of. */
+/* ------------------------------------------------------------------ */
+
+/* This section defines magic (a.k.a appalling-hack) macros for doing
+ guaranteed-no-redirection macros, so as to get from function
+ wrappers to the functions they are wrapping. The whole point is to
+ construct standard call sequences, but to do the call itself with a
+ special no-redirect call pseudo-instruction that the JIT
+ understands and handles specially. This section is long and
+ repetitious, and I can't see a way to make it shorter.
+
+ The naming scheme is as follows:
+
+ CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
+
+ 'W' stands for "word" and 'v' for "void". Hence there are
+ different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
+ and for each, the possibility of returning a word-typed result, or
+ no result.
+*/
+
+/* Use these to write the name of your wrapper. NOTE: duplicates
+ VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+
+#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
+ _vgwZU_##soname##_##fnname
+
+#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
+ _vgwZZ_##soname##_##fnname
+
+/* Use this macro from within a wrapper function to collect the
+ context (address and possibly other info) of the original function.
+ Once you have that you can then use it in one of the CALL_FN_
+ macros. The type of the argument _lval is OrigFn. */
+#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
+
+/* Derivatives of the main macros below, for calling functions
+ returning void. */
+
+#define CALL_FN_v_v(fnptr) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_v(_junk,fnptr); } while (0)
+
+#define CALL_FN_v_W(fnptr, arg1) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
+
+#define CALL_FN_v_WW(fnptr, arg1,arg2) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
+
+#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
+
+/* ------------------------- x86-linux ------------------------- */
+
+#if defined(PLAT_x86_linux)
+
+/* These regs are trashed by the hidden call. No need to mention eax
+ as gcc can already see that, plus causes gcc to bomb. */
+#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
+
+/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $4, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $8, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $12, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $16, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $20, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $24, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $28, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $32, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $36, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $40, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "pushl 44(%%eax)\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $44, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "pushl 48(%%eax)\n\t" \
+ "pushl 44(%%eax)\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $48, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_x86_linux */
+
+/* ------------------------ amd64-linux ------------------------ */
+
+#if defined(PLAT_amd64_linux)
+
+/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
+ "rdi", "r8", "r9", "r10", "r11"
+
+/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
+ long) == 8. */
+
+/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
+ macros. In order not to trash the stack redzone, we need to drop
+ %rsp by 128 before the hidden call, and restore afterwards. The
+ nastyness is that it is only by luck that the stack still appears
+ to be unwindable during the hidden call - since then the behaviour
+ of any routine using this macro does not match what the CFI data
+ says. Sigh.
+
+ Why is this important? Imagine that a wrapper has a stack
+ allocated local, and passes to the hidden call, a pointer to it.
+ Because gcc does not know about the hidden call, it may allocate
+ that local in the redzone. Unfortunately the hidden call may then
+ trash it before it comes to use it. So we must step clear of the
+ redzone, for the duration of the hidden call, to make it safe.
+
+ Probably the same problem afflicts the other redzone-style ABIs too
+ (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+ self describing (none of this CFI nonsense) so at least messing
+ with the stack pointer doesn't give a danger of non-unwindable
+ stack. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CALL_NOREDIR_RAX \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $8, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $16, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $24, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $32, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $40, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 96(%%rax)\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $48, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_amd64_linux */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+/* This is useful for finding out about the on-stack stuff:
+
+ extern int f9 ( int,int,int,int,int,int,int,int,int );
+ extern int f10 ( int,int,int,int,int,int,int,int,int,int );
+ extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
+ extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
+
+ int g9 ( void ) {
+ return f9(11,22,33,44,55,66,77,88,99);
+ }
+ int g10 ( void ) {
+ return f10(11,22,33,44,55,66,77,88,99,110);
+ }
+ int g11 ( void ) {
+ return f11(11,22,33,44,55,66,77,88,99,110,121);
+ }
+ int g12 ( void ) {
+ return f12(11,22,33,44,55,66,77,88,99,110,121,132);
+ }
+*/
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc32-linux,
+ sizeof(unsigned long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-16\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,16\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-16\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,16\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-32\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,16(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,32\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ _argvec[12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-32\n\t" \
+ /* arg12 */ \
+ "lwz 3,48(11)\n\t" \
+ "stw 3,20(1)\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,16(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,32\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
+ long) == 8. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,128" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,128" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,144" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
+ /* arg12 */ \
+ "ld 3,96(11)\n\t" \
+ "std 3,136(1)\n\t" \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,144" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+ still works. Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
+ "addi 1,1,-" #_n_fr "\n\t" \
+ "lwz 3," #_n_fr "(1)\n\t" \
+ "stw 3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr) \
+ "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(64) \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(64) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(64) \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(64) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(72) \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,64(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(72) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(72) \
+ /* arg12 */ \
+ "lwz 3,48(11)\n\t" \
+ "stw 3,68(1)\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,64(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(72) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+ still works. Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
+ "addi 1,1,-" #_n_fr "\n\t" \
+ "ld 3," #_n_fr "(1)\n\t" \
+ "std 3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr) \
+ "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
+ long) == 8. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(128) \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(128) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(128) \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(128) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(144) \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(144) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(144) \
+ /* arg12 */ \
+ "ld 3,96(11)\n\t" \
+ "std 3,136(1)\n\t" \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(144) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc64_aix5 */
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
+/* */
+/* ------------------------------------------------------------------ */
+
+/* Some request codes. There are many more of these, but most are not
+ exposed to end-user view. These are the public ones, all of the
+ form 0x1000 + small_number.
+
+ Core ones are in the range 0x00000000--0x0000ffff. The non-public
+ ones start at 0x2000.
+*/
+
+/* These macros are used by tools -- they must be public, but don't
+ embed them into other programs. */
+#define VG_USERREQ_TOOL_BASE(a,b) \
+ ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
+#define VG_IS_TOOL_USERREQ(a, b, v) \
+ (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+ This enum comprises an ABI exported by Valgrind to programs
+ which use client requests. DO NOT CHANGE THE ORDER OF THESE
+ ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+ enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
+ VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
+
+ /* These allow any function to be called from the simulated
+ CPU but run on the real CPU. Nb: the first arg passed to
+ the function is always the ThreadId of the running
+ thread! So CLIENT_CALL0 actually requires a 1 arg
+ function, etc. */
+ VG_USERREQ__CLIENT_CALL0 = 0x1101,
+ VG_USERREQ__CLIENT_CALL1 = 0x1102,
+ VG_USERREQ__CLIENT_CALL2 = 0x1103,
+ VG_USERREQ__CLIENT_CALL3 = 0x1104,
+
+ /* Can be useful in regression testing suites -- eg. can
+ send Valgrind's output to /dev/null and still count
+ errors. */
+ VG_USERREQ__COUNT_ERRORS = 0x1201,
+
+ /* These are useful and can be interpreted by any tool that
+ tracks malloc() et al, by using vg_replace_malloc.c. */
+ VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+ VG_USERREQ__FREELIKE_BLOCK = 0x1302,
+ /* Memory pool support. */
+ VG_USERREQ__CREATE_MEMPOOL = 0x1303,
+ VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
+ VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
+ VG_USERREQ__MEMPOOL_FREE = 0x1306,
+ VG_USERREQ__MEMPOOL_TRIM = 0x1307,
+ VG_USERREQ__MOVE_MEMPOOL = 0x1308,
+ VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
+ VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
+
+ /* Allow printfs to valgrind log. */
+ VG_USERREQ__PRINTF = 0x1401,
+ VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+
+ /* Stack support. */
+ VG_USERREQ__STACK_REGISTER = 0x1501,
+ VG_USERREQ__STACK_DEREGISTER = 0x1502,
+ VG_USERREQ__STACK_CHANGE = 0x1503
+ } Vg_ClientRequest;
+
+#if !defined(__GNUC__)
+# define __extension__ /* */
+#endif
+
+/* Returns the number of Valgrinds this code is running under. That
+ is, 0 if running natively, 1 if running under Valgrind, 2 if
+ running under Valgrind which is running under another Valgrind,
+ etc. */
+#define RUNNING_ON_VALGRIND __extension__ \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \
+ VG_USERREQ__RUNNING_ON_VALGRIND, \
+ 0, 0, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+
+/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
+ _qzz_len - 1]. Useful if you are debugging a JITter or some such,
+ since it provides a way to make sure valgrind will retranslate the
+ invalidated area. Returns no value. */
+#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DISCARD_TRANSLATIONS, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ }
+
+
+/* These requests are for getting Valgrind itself to print something.
+ Possibly with a backtrace. This is a really ugly hack. */
+
+#if defined(NVALGRIND)
+
+# define VALGRIND_PRINTF(...)
+# define VALGRIND_PRINTF_BACKTRACE(...)
+
+#else /* NVALGRIND */
+
+/* Modern GCC will optimize the static routine out if unused,
+ and unused attribute will shut down warnings about it. */
+static int VALGRIND_PRINTF(const char *format, ...)
+ __attribute__((format(__printf__, 1, 2), __unused__));
+static int
+VALGRIND_PRINTF(const char *format, ...)
+{
+ unsigned long _qzz_res;
+ va_list vargs;
+ va_start(vargs, format);
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
+ (unsigned long)format, (unsigned long)vargs,
+ 0, 0, 0);
+ va_end(vargs);
+ return (int)_qzz_res;
+}
+
+static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+ __attribute__((format(__printf__, 1, 2), __unused__));
+static int
+VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+{
+ unsigned long _qzz_res;
+ va_list vargs;
+ va_start(vargs, format);
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
+ (unsigned long)format, (unsigned long)vargs,
+ 0, 0, 0);
+ va_end(vargs);
+ return (int)_qzz_res;
+}
+
+#endif /* NVALGRIND */
+
+
+/* These requests allow control to move from the simulated CPU to the
+ real CPU, calling an arbitary function.
+
+ Note that the current ThreadId is inserted as the first argument.
+ So this call:
+
+ VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
+
+ requires f to have this signature:
+
+ Word f(Word tid, Word arg1, Word arg2)
+
+ where "Word" is a word-sized type.
+
+ Note that these client requests are not entirely reliable. For example,
+ if you call a function with them that subsequently calls printf(),
+ there's a high chance Valgrind will crash. Generally, your prospects of
+ these working are made higher if the called function does not refer to
+ any global variables, and does not refer to any libc or other functions
+ (printf et al). Any kind of entanglement with libc or dynamic linking is
+ likely to have a bad outcome, for tricky reasons which we've grappled
+ with a lot in the past.
+*/
+#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL0, \
+ _qyy_fn, \
+ 0, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL1, \
+ _qyy_fn, \
+ _qyy_arg1, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL2, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL3, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, \
+ _qyy_arg3, 0); \
+ _qyy_res; \
+ })
+
+
+/* Counts the number of errors that have been recorded by a tool. Nb:
+ the tool must record the errors with VG_(maybe_record_error)() or
+ VG_(unique_error)() for them to be counted. */
+#define VALGRIND_COUNT_ERRORS \
+ __extension__ \
+ ({unsigned int _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__COUNT_ERRORS, \
+ 0, 0, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+/* Mark a block of memory as having been allocated by a malloc()-like
+ function. `addr' is the start of the usable block (ie. after any
+ redzone) `rzB' is redzone size if the allocator can apply redzones;
+ use '0' if not. Adding redzones makes it more likely Valgrind will spot
+ block overruns. `is_zeroed' indicates if the memory is zeroed, as it is
+ for calloc(). Put it immediately after the point where a block is
+ allocated.
+
+ If you're using Memcheck: If you're allocating memory via superblocks,
+ and then handing out small chunks of each superblock, if you don't have
+ redzones on your small blocks, it's worth marking the superblock with
+ VALGRIND_MAKE_MEM_NOACCESS when it's created, so that block overruns are
+ detected. But if you can put redzones on, it's probably better to not do
+ this, so that messages for small overruns are described in terms of the
+ small block rather than the superblock (but if you have a big overrun
+ that skips over a redzone, you could miss an error this way). See
+ memcheck/tests/custom_alloc.c for an example.
+
+ WARNING: if your allocator uses malloc() or 'new' to allocate
+ superblocks, rather than mmap() or brk(), this will not work properly --
+ you'll likely get assertion failures during leak detection. This is
+ because Valgrind doesn't like seeing overlapping heap blocks. Sorry.
+
+ Nb: block must be freed via a free()-like function specified
+ with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MALLOCLIKE_BLOCK, \
+ addr, sizeB, rzB, is_zeroed, 0); \
+ }
+
+/* Mark a block of memory as having been freed by a free()-like function.
+ `rzB' is redzone size; it must match that given to
+ VALGRIND_MALLOCLIKE_BLOCK. Memory not freed will be detected by the leak
+ checker. Put it immediately after the point where the block is freed. */
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__FREELIKE_BLOCK, \
+ addr, rzB, 0, 0, 0); \
+ }
+
+/* Create a memory pool. */
+#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__CREATE_MEMPOOL, \
+ pool, rzB, is_zeroed, 0, 0); \
+ }
+
+/* Destroy a memory pool. */
+#define VALGRIND_DESTROY_MEMPOOL(pool) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DESTROY_MEMPOOL, \
+ pool, 0, 0, 0, 0); \
+ }
+
+/* Associate a piece of memory with a memory pool. */
+#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_ALLOC, \
+ pool, addr, size, 0, 0); \
+ }
+
+/* Disassociate a piece of memory from a memory pool. */
+#define VALGRIND_MEMPOOL_FREE(pool, addr) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_FREE, \
+ pool, addr, 0, 0, 0); \
+ }
+
+/* Disassociate any pieces outside a particular range. */
+#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_TRIM, \
+ pool, addr, size, 0, 0); \
+ }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MOVE_MEMPOOL, \
+ poolA, poolB, 0, 0, 0); \
+ }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_CHANGE, \
+ pool, addrA, addrB, size, 0); \
+ }
+
+/* Return 1 if a mempool exists, else 0. */
+#define VALGRIND_MEMPOOL_EXISTS(pool) \
+ __extension__ \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_EXISTS, \
+ pool, 0, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+/* Mark a piece of memory as being a stack. Returns a stack id. */
+#define VALGRIND_STACK_REGISTER(start, end) \
+ __extension__ \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_REGISTER, \
+ start, end, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+/* Unmark the piece of memory associated with a stack id as being a
+ stack. */
+#define VALGRIND_STACK_DEREGISTER(id) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_DEREGISTER, \
+ id, 0, 0, 0, 0); \
+ }
+
+/* Change the start and end address of the stack id. */
+#define VALGRIND_STACK_CHANGE(id, start, end) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_CHANGE, \
+ id, start, end, 0, 0); \
+ }
+
+
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+#endif /* __VALGRIND_H */