diff -Nru cln-1.3.1.orig/src/base/cl_low.h cln-1.3.1.hack/src/base/cl_low.h --- cln-1.3.1.orig/src/base/cl_low.h 2009-05-10 20:32:30 +0000 +++ cln-1.3.1.hack/src/base/cl_low.h 2010-09-09 18:18:01 +0000 @@ -3,6 +3,12 @@ #ifndef _CL_LOW_H #define _CL_LOW_H +extern "C" uint32 cln_mulu32_high; +extern "C" uint64 cln_mulu64_high; +extern "C" uint16 cln_divu_16_rest; +extern "C" uint32 cln_divu_32_rest; +extern "C" uint64 cln_divu_64_rest; + namespace cln { // Determines the sign of a 16-bit number. @@ -202,7 +208,7 @@ // > arg1, arg2 : zwei 32-Bit-Zahlen // < 2^32*hi+lo : eine 64-Bit-Zahl extern "C" uint32 mulu32_ (uint32 arg1, uint32 arg2); // -> Low-Teil - extern "C" uint32 mulu32_high; // -> High-Teil + //extern "C" uint32 mulu32_high; // -> High-Teil #if defined(__GNUC__) && defined(__m68k__) && !defined(NO_ASM) #define mulu32(x,y,hi_zuweisung,lo_zuweisung) \ ({ var uint32 _x = (x); \ @@ -294,7 +300,7 @@ #pragma aux mulu32_high_ = /* */ value [edx] modify []; #else #define mulu32(x,y,hi_zuweisung,lo_zuweisung) \ - { lo_zuweisung mulu32_(x,y); unused (hi_zuweisung mulu32_high); } + { lo_zuweisung mulu32_(x,y); unused (hi_zuweisung cln_mulu32_high); } #if (defined(__m68k__) || defined(__sparc__) || defined(__sparc64__) || defined(__arm__) || (defined(__i386__) && !defined(WATCOM) && !defined(MICROSOFT)) || defined(__x86_64__) || defined(__mips__) || defined(__hppa__)) && !defined(NO_ASM) // mulu32_ extern in Assembler #if defined(__sparc__) || defined(__sparc64__) @@ -336,7 +342,7 @@ // > arg1, arg2 : zwei 64-Bit-Zahlen // < 2^64*hi+lo : eine 128-Bit-Zahl extern "C" uint64 mulu64_ (uint64 arg1, uint64 arg2); // -> Low-Teil - extern "C" uint64 mulu64_high; // -> High-Teil + //extern "C" uint64 mulu64_high; // -> High-Teil #if defined(__GNUC__) && defined(__alpha__) && !defined(NO_ASM) #define mulu64(x,y,hi_zuweisung,lo_zuweisung) \ ({ var register uint64 _x = (x); \ @@ -383,7 +389,7 @@ }) #else #define mulu64(x,y,hi_zuweisung,lo_zuweisung) \ - { lo_zuweisung mulu64_(x,y); hi_zuweisung mulu64_high; } + { lo_zuweisung mulu64_(x,y); hi_zuweisung cln_mulu64_high; } #if defined(__sparc64__) && !defined(NO_ASM) // mulu64_ extern in Assembler extern "C" uint64 _get_g2 (void); @@ -424,7 +430,7 @@ extern "C" uint32 divu_3216_1616_ (uint32 x, uint16 y); // -> Quotient q, Rest r #else extern "C" uint16 divu_3216_1616_ (uint32 x, uint16 y); // -> Quotient q - extern "C" uint16 divu_16_rest; // -> Rest r + //extern "C" uint16 divu_16_rest; // -> Rest r #endif #if defined(__GNUC__) && defined(__sparc64__) && !defined(NO_ASM) #define divu_3216_1616(x,y,q_zuweisung,r_zuweisung) \ @@ -495,12 +501,12 @@ #elif defined(__arm__) && !defined(NO_ASM) #define divu_3216_1616(x,y,q_zuweisung,r_zuweisung) \ { q_zuweisung divu_3216_1616_(x,y); /* extern in Assembler */ \ - r_zuweisung divu_16_rest; \ + r_zuweisung cln_divu_16_rest; \ } #define NEED_VAR_divu_16_rest #else #define divu_3216_1616(x,y,q_zuweisung,r_zuweisung) \ - { q_zuweisung divu_3216_1616_(x,y); r_zuweisung divu_16_rest; } + { q_zuweisung divu_3216_1616_(x,y); r_zuweisung cln_divu_16_rest; } #define NEED_FUNCTION_divu_3216_1616_ #endif @@ -514,7 +520,7 @@ // < uint16 r: x mod y // < x = q*y+r extern "C" uint32 divu_3216_3216_ (uint32 x, uint16 y); // -> Quotient q - extern "C" uint16 divu_16_rest; // -> Rest r + //extern "C" uint16 divu_16_rest; // -> Rest r #if defined(__GNUC__) && defined(__sparc64__) && !defined(NO_ASM) #define divu_3216_3216(x,y,q_zuweisung,r_zuweisung) \ ({var uint32 __x = (x); \ @@ -564,7 +570,7 @@ // < uint32 r: x mod y // < x = q*y+r extern "C" uint32 divu_3232_3232_ (uint32 x, uint32 y); // -> Quotient q - extern "C" uint32 divu_32_rest; // -> Rest r + //extern "C" uint32 divu_32_rest; // -> Rest r #if defined(__GNUC__) && defined(__sparc64__) && !defined(NO_ASM) #define divu_3232_3232(x,y,q_zuweisung,r_zuweisung) \ ({var uint32 __x = (x); \ @@ -661,7 +667,7 @@ // < uint32 r: x mod y // < x = q*y+r extern "C" uint32 divu_6432_3232_ (uint32 xhi, uint32 xlo, uint32 y); // -> Quotient q - extern "C" uint32 divu_32_rest; // -> Rest r + //extern "C" uint32 divu_32_rest; // -> Rest r #if defined(__GNUC__) && defined(__m68k__) && !defined(NO_ASM) #define divu_6432_3232(xhi,xlo,y,q_zuweisung,r_zuweisung) \ ({var uint32 __xhi = (xhi); \ @@ -750,7 +756,7 @@ #pragma aux divu_6432_3232_rest = /* */ value [edx] modify []; #else #define divu_6432_3232(xhi,xlo,y,q_zuweisung,r_zuweisung) \ - { q_zuweisung divu_6432_3232_(xhi,xlo,y); r_zuweisung divu_32_rest; } + { q_zuweisung divu_6432_3232_(xhi,xlo,y); r_zuweisung cln_divu_32_rest; } #if (defined(__m68k__) || defined(__sparc__) || defined(__sparc64__) || defined(__arm__) || (defined(__i386__) && !defined(WATCOM) && !defined(MICROSOFT)) || defined(__x86_64__) || defined(__hppa__)) && !defined(NO_ASM) // divu_6432_3232_ extern in Assembler #if defined(__sparc__) || defined(__sparc64__) @@ -944,9 +950,9 @@ // multiplication is slower or faster than our own divu_6464_6464_ routine. // Anyway, call our own routine. extern "C" uint64 divu_6464_6464_ (uint64 x, uint64 y); // -> Quotient q - extern "C" uint64 divu_64_rest; // -> Rest r + //extern "C" uint64 divu_64_rest; // -> Rest r #define divu_6464_6464(x,y,q_zuweisung,r_zuweisung) \ - { q_zuweisung divu_6464_6464_(x,y); r_zuweisung divu_64_rest; } + { q_zuweisung divu_6464_6464_(x,y); r_zuweisung cln_divu_64_rest; } #define NEED_VAR_divu_64_rest #define NEED_FUNCTION_divu_6464_6464_ #endif @@ -961,7 +967,7 @@ // < uint64 r: x mod y // < x = q*y+r extern "C" uint64 divu_12864_6464_ (uint64 xhi, uint64 xlo, uint64 y); // -> Quotient q - extern "C" uint64 divu_64_rest; // -> Rest r + //extern "C" uint64 divu_64_rest; // -> Rest r #if defined(__GNUC__) && defined(__x86_64__) && !defined(NO_ASM) #define divu_12864_6464(xhi,xlo,y,q_zuweisung,r_zuweisung) \ ({var uint64 __xhi = (xhi); \ @@ -981,7 +987,7 @@ ({var uint64 ___q; divu_12864_6464(xhi,xlo,y,___q=,); ___q; }) #else #define divu_12864_6464(xhi,xlo,y,q_zuweisung,r_zuweisung) \ - { q_zuweisung divu_12864_6464_(xhi,xlo,y); r_zuweisung divu_64_rest; } + { q_zuweisung divu_12864_6464_(xhi,xlo,y); r_zuweisung cln_divu_64_rest; } #define NEED_VAR_divu_64_rest #define NEED_FUNCTION_divu_12864_6464_ #endif diff -Nru cln-1.3.1.orig/src/base/low/cl_low_div.cc cln-1.3.1.hack/src/base/low/cl_low_div.cc --- cln-1.3.1.orig/src/base/low/cl_low_div.cc 2009-05-10 20:32:30 +0000 +++ cln-1.3.1.hack/src/base/low/cl_low_div.cc 2010-09-09 18:18:18 +0000 @@ -10,18 +10,18 @@ // Implementation. #ifdef NEED_VAR_divu_16_rest -uint16 divu_16_rest; +uint16 cln_divu_16_rest; #endif #ifdef NEED_FUNCTION_divu_3216_1616_ -uint16 divu_16_rest; +uint16 cln_divu_16_rest; namespace cln { #if 1 // Most processors have a good 32 by 32 bit division, use that. uint16 divu_3216_1616_ (uint32 x, uint16 y) { var uint16 q = floor(x,(uint32)y); - divu_16_rest = x - (uint32)q * (uint32)y; + cln_divu_16_rest = x - (uint32)q * (uint32)y; return q; } #else @@ -101,11 +101,11 @@ #endif #ifdef NEED_VAR_divu_32_rest -uint32 divu_32_rest; +uint32 cln_divu_32_rest; #endif #ifdef NEED_FUNCTION_divu_6432_3232_ -uint32 divu_32_rest; +uint32 cln_divu_32_rest; namespace cln { uint32 divu_6432_3232_(uint32 xhi, uint32 xlo, uint32 y) // Methode: @@ -118,7 +118,7 @@ var uint16 q0; var uint16 r1; divu_3216_1616(highlow32(low16(xhi),high16(xlo)),y, q1=,r1=); - divu_3216_1616(highlow32(r1,low16(xlo)),y, q0=,divu_32_rest=); + divu_3216_1616(highlow32(r1,low16(xlo)),y, q0=,cln_divu_32_rest=); return highlow32(q1,q0); } // y>=2^16 @@ -200,14 +200,14 @@ if (r >= y) { q0 += 1; r -= y; } }// Quotient q0, Rest r fertig. - divu_32_rest = r >> s; // Rest + cln_divu_32_rest = r >> s; // Rest return highlow32(q1,q0); // Quotient } } } } // namespace cln #endif #ifdef NEED_VAR_divu_64_rest -uint64 divu_64_rest; +uint64 cln_divu_64_rest; #endif #ifdef NEED_FUNCTION_divu_6464_6464_ @@ -242,7 +242,7 @@ var uint32 q0; var uint32 r1; divu_6432_3232(0,high32(x),y, q1 = , r1 = ); - divu_6432_3232(r1,low32(x),y, q0 = , divu_64_rest = ); + divu_6432_3232(r1,low32(x),y, q0 = , cln_divu_64_rest = ); return highlow64(q1,q0); } else @@ -269,7 +269,7 @@ if (x >= y) { q += 1; x -= y; } } - divu_64_rest = x; + cln_divu_64_rest = x; return (uint64)q; } } @@ -289,7 +289,7 @@ var uint32 q0; var uint32 r1; divu_6432_3232(low32(xhi),high32(xlo),y, q1=,r1=); - divu_6432_3232(r1,low32(xlo),y, q0=,divu_64_rest=); + divu_6432_3232(r1,low32(xlo),y, q0=,cln_divu_64_rest=); return highlow64(q1,q0); } // y>=2^32 @@ -371,7 +371,7 @@ if (r >= y) { q0 += 1; r -= y; } }// Quotient q0, Rest r fertig. - divu_64_rest = r >> s; // Rest + cln_divu_64_rest = r >> s; // Rest return highlow64(q1,q0); // Quotient } } } } // namespace cln diff -Nru cln-1.3.1.orig/src/base/low/cl_low_mul.cc cln-1.3.1.hack/src/base/low/cl_low_mul.cc --- cln-1.3.1.orig/src/base/low/cl_low_mul.cc 2009-05-10 20:32:30 +0000 +++ cln-1.3.1.hack/src/base/low/cl_low_mul.cc 2010-09-09 18:23:41 +0000 @@ -10,11 +10,11 @@ // Implementation. #ifdef NEED_VAR_mulu32_high -uint32 mulu32_high; +uint32 cln_mulu32_high; #endif #ifdef NEED_FUNCTION_mulu32_ -uint32 mulu32_high; +uint32 cln_mulu32_high; namespace cln { uint32 mulu32_ (uint32 x, uint32 y) { @@ -32,7 +32,7 @@ hi += high16(mid); mid = highlow32_0(low16(mid)); lo += mid; if (lo < mid) { hi += 1; } // 64-Bit-Addition } - mulu32_high = hi; return lo; + cln_mulu32_high = hi; return lo; } } // namespace cln #endif @@ -42,7 +42,7 @@ uint64 mulu32_w (uint32 arg1, uint32 arg2) { var uint32 lo = mulu32_(arg1,arg2); - var uint32 hi = mulu32_high; + var uint32 hi = cln_mulu32_high; return highlow64(hi,lo); } } // namespace cln @@ -50,11 +50,11 @@ #ifdef NEED_VAR_mulu64_high -uint64 mulu64_high; +uint64 cln_mulu64_high; #endif #ifdef NEED_FUNCTION_mulu64_ -uint64 mulu64_high; +uint64 cln_mulu64_high; namespace cln { extern "C" uint64 mulu64_ (uint64 x, uint64 y); uint64 mulu64_ (uint64 x, uint64 y) @@ -73,8 +73,7 @@ hi += high32(mid); mid = highlow64_0(low32(mid)); lo += mid; if (lo < mid) { hi += 1; } // 128-Bit-Addition } - mulu64_high = hi; return lo; + cln_mulu64_high = hi; return lo; } } // namespace cln #endif -