1 /** 2 * Library for efficient integer division by a single divider 3 * 4 * When repeatedly dividing (with integer arithmetics) by the same divider, there are certain tricks that allow 5 * for quicker operation that the CPU's divide command. This is payed for by higher computation work during the setup stage. 6 * 7 * For compile time known values, the compiler already performs this trick. This module is meant for run-time known values 8 * that are used repeatedly. 9 * 10 * This code is a D adaptation of libdivide ($(LINK http://libdivide.com)). 11 */ 12 13 /* 14 Copyright (C) 2010 ridiculous_fish libdivide@ridiculousfish.com 15 Copyright (C) 2017 Weka.IO 16 17 Notice that though the original libdivide is available under either the zlib license or Boost, the D adaptation here 18 is only available under the Boost license. 19 20 Please see the AUTHORS file for full copyright and license information. 21 */ 22 module mecca.lib.division; 23 24 import std.stdint; 25 26 /** 27 * Signed 32 bit divisor 28 * 29 * Simply use on right side of division operation 30 */ 31 struct S32Divisor { 32 /// 33 unittest { 34 assert (1000 / S32Divisor(50) == 20); 35 // Can be used with CTFE 36 static assert (1000 / S32Divisor(50) == 20); 37 } 38 39 alias Type = typeof(magic); 40 int32_t magic; 41 uint8_t more; 42 43 this(int32_t d) { 44 assert (d > 0, "d<=0"); 45 46 // If d is a power of 2, or negative a power of 2, we have to use a shift. This is especially 47 // important because the magic algorithm fails for -1. To check if d is a power of 2 or its inverse, 48 // it suffices to check whether its absolute value has exactly one bit set. This works even for INT_MIN, 49 // because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set and is a power of 2. 50 uint32_t absD = cast(uint32_t)(d < 0 ? -d : d); //gcc optimizes this to the fast abs trick 51 if ((absD & (absD - 1)) == 0) { //check if exactly one bit is set, don't care if absD is 0 since that's divide by zero 52 this.magic = 0; 53 this.more = cast(uint8_t)(libdivide__count_trailing_zeros32(absD) | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0) | LIBDIVIDE_S32_SHIFT_PATH); 54 } 55 else { 56 const uint32_t floor_log_2_d = cast(uint8_t)(31 - libdivide__count_leading_zeros32(absD)); 57 assert(floor_log_2_d >= 1); 58 59 uint8_t more; 60 //the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word is 0 and the high word is floor_log_2_d - 1 61 uint32_t rem, proposed_m; 62 proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem); 63 const uint32_t e = absD - rem; 64 65 /* We are going to start with a power of floor_log_2_d - 1. This works if works if e < 2**floor_log_2_d. */ 66 if (e < (1U << floor_log_2_d)) { 67 /* This power works */ 68 more = cast(uint8_t)(floor_log_2_d - 1); 69 } 70 else { 71 // We need to go one higher. This should not make proposed_m overflow, but it will make it negative 72 // when interpreted as an int32_t. 73 proposed_m += proposed_m; 74 const uint32_t twice_rem = rem + rem; 75 if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; 76 more = cast(uint8_t)(floor_log_2_d | LIBDIVIDE_ADD_MARKER | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0)); //use the general algorithm 77 } 78 proposed_m += 1; 79 this.magic = (d < 0 ? -cast(int32_t)proposed_m : cast(int32_t)proposed_m); 80 this.more = more; 81 } 82 } 83 84 ref auto opAssign(int32_t d) { 85 this.__ctor(d); 86 return this; 87 } 88 89 int32_t opBinaryRight(string op: "/")(int32_t dividend) const pure nothrow @safe @nogc { 90 if (more & LIBDIVIDE_S32_SHIFT_PATH) { 91 uint8_t shifter = more & LIBDIVIDE_32_SHIFT_MASK; 92 int32_t q = dividend + ((dividend >> 31) & ((1 << shifter) - 1)); 93 q = q >> shifter; 94 int32_t shiftMask = cast(int8_t)(more >> 7); //must be arithmetic shift and then sign-extend 95 q = (q ^ shiftMask) - shiftMask; 96 return q; 97 } 98 else { 99 int32_t q = libdivide__mullhi_s32(magic, dividend); 100 if (more & LIBDIVIDE_ADD_MARKER) { 101 int32_t sign = cast(int8_t)(more >> 7); //must be arithmetic shift and then sign extend 102 q += ((dividend ^ sign) - sign); 103 } 104 q >>= more & LIBDIVIDE_32_SHIFT_MASK; 105 q += (q < 0); 106 return q; 107 } 108 } 109 } 110 111 /** 112 * Unsigned 32 bit divisor 113 * 114 * Simply use on right side of division operation 115 */ 116 struct U32Divisor { 117 /// 118 unittest { 119 assert (1000 / U32Divisor(31) == 32); 120 // Can be used with CTFE 121 static assert (1000 / U32Divisor(31) == 32); 122 } 123 124 alias Type = typeof(magic); 125 uint32_t magic; 126 uint8_t more; 127 128 this(uint32_t d) { 129 assert (d > 0, "d==0"); 130 if ((d & (d - 1)) == 0) { 131 this.magic = 0; 132 this.more = cast(uint8_t)(libdivide__count_trailing_zeros32(d) | LIBDIVIDE_U32_SHIFT_PATH); 133 } 134 else { 135 const uint32_t floor_log_2_d = 31 - libdivide__count_leading_zeros32(d); 136 137 uint8_t more; 138 uint32_t rem, proposed_m; 139 proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem); 140 141 assert(rem > 0 && rem < d); 142 const uint32_t e = d - rem; 143 144 /* This power works if e < 2**floor_log_2_d. */ 145 if (e < (1U << floor_log_2_d)) { 146 /* This power works */ 147 more = cast(uint8_t)floor_log_2_d; 148 } 149 else { 150 // We have to use the general 33-bit algorithm. We need to compute (2**power) / d. 151 // However, we already have (2**(power-1))/d and its remainder. By doubling both, and then 152 // correcting the remainder, we can compute the larger division. */ 153 proposed_m += proposed_m; //don't care about overflow here - in fact, we expect it 154 const uint32_t twice_rem = rem + rem; 155 if (twice_rem >= d || twice_rem < rem) proposed_m += 1; 156 more = cast(uint8_t)(floor_log_2_d | LIBDIVIDE_ADD_MARKER); 157 } 158 this.magic = 1 + proposed_m; 159 this.more = more; 160 //result.more's shift should in general be ceil_log_2_d. But if we used the smaller power, we 161 //subtract one from the shift because we're using the smaller power. If we're using the larger power, 162 //we subtract one from the shift because it's taken care of by the add indicator. 163 //So floor_log_2_d happens to be correct in both cases. 164 } 165 } 166 167 ref auto opAssign(uint32_t d) { 168 this.__ctor(d); 169 return this; 170 } 171 172 uint32_t opBinaryRight(string op: "/")(uint32_t dividend) const pure nothrow @safe @nogc { 173 if (more & LIBDIVIDE_U32_SHIFT_PATH) { 174 return dividend >> (more & LIBDIVIDE_32_SHIFT_MASK); 175 } 176 else { 177 uint32_t q = libdivide__mullhi_u32(magic, dividend); 178 if (more & LIBDIVIDE_ADD_MARKER) { 179 uint32_t t = ((dividend - q) >> 1) + q; 180 return t >> (more & LIBDIVIDE_32_SHIFT_MASK); 181 } 182 else { 183 return q >> more; //all upper bits are 0 - don't need to mask them off 184 } 185 } 186 } 187 } 188 189 /** 190 * Signed 64 bit divisor 191 * 192 * Simply use on right side of division operation 193 */ 194 struct S64Divisor { 195 /// 196 unittest { 197 assert (1000 / S64Divisor(81) == 12); 198 // Can be used with CTFE 199 static assert (1000 / S64Divisor(81) == 12); 200 } 201 202 alias Type = typeof(magic); 203 int64_t magic; 204 uint8_t more; 205 206 this(int64_t d) nothrow @trusted @nogc { 207 assert (d > 0, "d<=0"); 208 // If d is a power of 2, or negative a power of 2, we have to use a shift. This is especially important 209 // because the magic algorithm fails for -1. To check if d is a power of 2 or its inverse, it suffices 210 // to check whether its absolute value has exactly one bit set. This works even for INT_MIN, 211 // because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set and is a power of 2. 212 const uint64_t absD = cast(uint64_t)(d < 0 ? -d : d); //gcc optimizes this to the fast abs trick 213 if ((absD & (absD - 1)) == 0) { //check if exactly one bit is set, don't care if absD is 0 since that's divide by zero 214 this.more = cast(ubyte)(libdivide__count_trailing_zeros64(absD) | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0)); 215 this.magic = 0; 216 } 217 else { 218 const uint32_t floor_log_2_d = cast(uint32_t)(63 - libdivide__count_leading_zeros64(absD)); 219 220 //the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word is 0 and the high word is floor_log_2_d - 1 221 uint8_t more; 222 uint64_t rem, proposed_m; 223 proposed_m = libdivide_128_div_64_to_64(1UL << (floor_log_2_d - 1), 0, absD, &rem); // XXX This line is not @safe 224 const uint64_t e = absD - rem; 225 226 /* We are going to start with a power of floor_log_2_d - 1. This works if works if e < 2**floor_log_2_d. */ 227 if (e < (1UL << floor_log_2_d)) { 228 /* This power works */ 229 more = cast(ubyte)(floor_log_2_d - 1); 230 } 231 else { 232 // We need to go one higher. This should not make proposed_m overflow, but it will make it 233 // negative when interpreted as an int32_t. 234 proposed_m += proposed_m; 235 const uint64_t twice_rem = rem + rem; 236 if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; 237 more = cast(ubyte)(floor_log_2_d | LIBDIVIDE_ADD_MARKER | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0)); 238 } 239 proposed_m += 1; 240 this.more = more; 241 this.magic = (d < 0 ? -cast(int64_t)proposed_m : cast(int64_t)proposed_m); 242 } 243 } 244 245 ref auto opAssign(int64_t d) { 246 this.__ctor(d); 247 return this; 248 } 249 250 int64_t opBinaryRight(string op: "/")(int64_t dividend) const pure nothrow @safe @nogc { 251 if (magic == 0) { //shift path 252 uint32_t shifter = more & LIBDIVIDE_64_SHIFT_MASK; 253 int64_t q = dividend + ((dividend >> 63) & ((1L << shifter) - 1)); 254 q = q >> shifter; 255 int64_t shiftMask = cast(int8_t)(more >> 7); //must be arithmetic shift and then sign-extend 256 q = (q ^ shiftMask) - shiftMask; 257 return q; 258 } 259 else { 260 int64_t q = libdivide__mullhi_s64(magic, dividend); 261 if (more & LIBDIVIDE_ADD_MARKER) { 262 int64_t sign = cast(int8_t)(more >> 7); //must be arithmetic shift and then sign extend 263 q += ((dividend ^ sign) - sign); 264 } 265 q >>= more & LIBDIVIDE_64_SHIFT_MASK; 266 q += (q < 0); 267 return q; 268 } 269 } 270 } 271 272 /** 273 * Unsigned 64 bit divisor 274 * 275 * Simply use on right side of division operation 276 */ 277 struct U64Divisor { 278 /// 279 unittest { 280 assert (1_000_000_000_000 / S64Divisor(1783) == 560_852_495); 281 // Can be used with CTFE 282 static assert (1_000_000_000_000 / S64Divisor(1783) == 560_852_495); 283 } 284 285 alias Type = typeof(magic); 286 uint64_t magic; 287 uint8_t more; 288 289 this(uint64_t d) { 290 assert (d > 0, "d==0"); 291 if ((d & (d - 1)) == 0) { 292 this.more = cast(uint8_t)(libdivide__count_trailing_zeros64(d) | LIBDIVIDE_U64_SHIFT_PATH); 293 this.magic = 0; 294 } 295 else { 296 const uint32_t floor_log_2_d = 63 - libdivide__count_leading_zeros64(d); 297 298 uint64_t proposed_m, rem; 299 uint8_t more; 300 proposed_m = libdivide_128_div_64_to_64(1UL << floor_log_2_d, 0, d, &rem); //== (1 << (64 + floor_log_2_d)) / d 301 302 assert(rem > 0 && rem < d); 303 const uint64_t e = d - rem; 304 305 /* This power works if e < 2**floor_log_2_d. */ 306 if (e < (1UL << floor_log_2_d)) { 307 /* This power works */ 308 more = cast(uint8_t)floor_log_2_d; 309 } 310 else { 311 // We have to use the general 65-bit algorithm. We need to compute (2**power) / d. However, 312 // we already have (2**(power-1))/d and its remainder. By doubling both, and then correcting 313 // the remainder, we can compute the larger division. 314 proposed_m += proposed_m; //don't care about overflow here - in fact, we expect it 315 const uint64_t twice_rem = rem + rem; 316 if (twice_rem >= d || twice_rem < rem) proposed_m += 1; 317 more = cast(uint8_t)(floor_log_2_d | LIBDIVIDE_ADD_MARKER); 318 } 319 this.magic = 1 + proposed_m; 320 this.more = more; 321 //result.more's shift should in general be ceil_log_2_d. But if we used the smaller power, we subtract 322 //one from the shift because we're using the smaller power. If we're using the larger power, we subtract 323 //one from the shift because it's taken care of by the add indicator. So floor_log_2_d happens to be 324 //correct in both cases, which is why we do it outside of the if statement. 325 } 326 } 327 328 ref auto opAssign(uint64_t d) { 329 this.__ctor(d); 330 return this; 331 } 332 333 uint64_t opBinaryRight(string op: "/")(uint64_t dividend) const pure nothrow @safe @nogc { 334 if (more & LIBDIVIDE_U64_SHIFT_PATH) { 335 return dividend >> (more & LIBDIVIDE_64_SHIFT_MASK); 336 } 337 else { 338 uint64_t q = libdivide__mullhi_u64(magic, dividend); 339 if (more & LIBDIVIDE_ADD_MARKER) { 340 uint64_t t = ((dividend - q) >> 1) + q; 341 return t >> (more & LIBDIVIDE_64_SHIFT_MASK); 342 } 343 else { 344 return q >> more; //all upper bits are 0 - don't need to mask them off 345 } 346 } 347 } 348 } 349 350 /// Automatically selects the correct divisor based on type 351 auto divisor(T)(T value) { 352 static if (is(T == uint32_t)) { 353 return U32Divisor(value); 354 } 355 else static if (is(T == int32_t)) { 356 return S32Divisor(value); 357 } 358 else static if (is(T == uint64_t)) { 359 return U64Divisor(value); 360 } 361 else static if (is(T == int64_t)) { 362 return S64Divisor(value); 363 } 364 else { 365 static assert (false, "T must be an int, uint, long, ulong, not " ~ T.stringof); 366 } 367 } 368 369 private: 370 enum { 371 LIBDIVIDE_32_SHIFT_MASK = 0x1F, 372 LIBDIVIDE_64_SHIFT_MASK = 0x3F, 373 LIBDIVIDE_ADD_MARKER = 0x40, 374 LIBDIVIDE_U32_SHIFT_PATH = 0x80, 375 LIBDIVIDE_U64_SHIFT_PATH = 0x80, 376 LIBDIVIDE_S32_SHIFT_PATH = 0x20, 377 LIBDIVIDE_NEGATIVE_DIVISOR = 0x80, 378 } 379 380 static int32_t libdivide__count_trailing_zeros32(uint32_t val) pure nothrow @safe @nogc { 381 /* Fast way to count trailing zeros */ 382 //return __builtin_ctz(val); 383 /* Dorky way to count trailing zeros. Note that this hangs for val = 0! */ 384 int32_t result = 0; 385 val = (val ^ (val - 1)) >> 1; // Set v's trailing 0s to 1s and zero rest 386 while (val) { 387 val >>= 1; 388 result++; 389 } 390 return result; 391 } 392 393 static int32_t libdivide__count_leading_zeros32(uint32_t val) pure nothrow @safe @nogc { 394 /* Fast way to count leading zeros */ 395 //return __builtin_clz(val); 396 /* Dorky way to count leading zeros. Note that this hangs for val = 0! */ 397 int32_t result = 0; 398 while (! (val & (1U << 31))) { 399 val <<= 1; 400 result++; 401 } 402 return result; 403 } 404 405 static uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) pure nothrow @safe @nogc { 406 //libdivide_64_div_32_to_32: divides a 64 bit uint {u1, u0} by a 32 bit uint {v}. The result must fit in 32 bits. 407 //Returns the quotient directly and the remainder in *r 408 //#if (LIBDIVIDE_IS_X86_64) 409 // uint32_t result; 410 // __asm__("divl %[v]" 411 // : "=a"(result), "=d"(*r) 412 // : [v] "r"(v), "a"(u0), "d"(u1) 413 // ); 414 // return result; 415 //} 416 //#else 417 uint64_t n = ((cast(uint64_t)u1) << 32) | u0; 418 uint32_t result = cast(uint32_t)(n / v); 419 *r = cast(uint32_t)(n - result * cast(uint64_t)v); 420 return result; 421 //#endif 422 } 423 424 static uint32_t libdivide__mullhi_u32(uint32_t x, uint32_t y) pure nothrow @safe @nogc { 425 uint64_t xl = x, yl = y; 426 uint64_t rl = xl * yl; 427 return cast(uint32_t)(rl >> 32); 428 } 429 430 static int32_t libdivide__count_trailing_zeros64(uint64_t val) pure nothrow @safe @nogc { 431 // Fast way to count trailing zeros. Note that we disable this in 32 bit because gcc does something horrible - 432 // it calls through to a dynamically bound function. 433 //return __builtin_ctzll(val); 434 // Pretty good way to count trailing zeros. Note that this hangs for val = 0 435 assert (val != 0); 436 uint32_t lo = val & 0xFFFFFFFF; 437 if (lo != 0) return libdivide__count_trailing_zeros32(lo); 438 return 32 + libdivide__count_trailing_zeros32(cast(uint32_t) (val >> 32)); 439 } 440 441 static int64_t libdivide__mullhi_s64(int64_t x, int64_t y) pure nothrow @safe @nogc { 442 static if (is(cent)) { 443 cent xl = x, yl = y; 444 cent rl = xl * yl; 445 return cast(cent)(rl >> 64); 446 } 447 else { 448 //full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) 449 const uint32_t mask = 0xFFFFFFFF; 450 const uint32_t x0 = cast(uint32_t)(x & mask), y0 = cast(uint32_t)(y & mask); 451 const int32_t x1 = cast(int32_t)(x >> 32), y1 = cast(int32_t)(y >> 32); 452 const uint32_t x0y0_hi = libdivide__mullhi_u32(x0, y0); 453 const int64_t t = x1*cast(int64_t)y0 + x0y0_hi; 454 const int64_t w1 = x0*cast(int64_t)y1 + (t & mask); 455 return x1*cast(int64_t)y1 + (t >> 32) + (w1 >> 32); 456 } 457 } 458 459 static int32_t libdivide__mullhi_s32(int32_t x, int32_t y) pure nothrow @safe @nogc { 460 int64_t xl = x, yl = y; 461 int64_t rl = xl * yl; 462 return cast(int32_t)(rl >> 32); //needs to be arithmetic shift 463 } 464 465 static int32_t libdivide__count_leading_zeros64(uint64_t val) pure nothrow @safe @nogc { 466 /* Fast way to count leading zeros */ 467 //return __builtin_clzll(val); 468 /* Dorky way to count leading zeros. Note that this hangs for val = 0! */ 469 int32_t result = 0; 470 while (! (val & (1UL << 63))) { 471 val <<= 1; 472 result++; 473 } 474 return result; 475 } 476 477 static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) pure nothrow @safe @nogc { 478 const uint64_t b = (1UL << 32); // Number base (16 bits). 479 uint64_t un1, un0, // Norm. dividend LSD's. 480 vn1, vn0, // Norm. divisor digits. 481 q1, q0, // Quotient digits. 482 un64, un21, un10, // Dividend digit pairs. 483 rhat; // A remainder. 484 int s; // Shift amount for norm. 485 486 if (u1 >= v) { // If overflow, set rem. 487 if (r !is null) { // to an impossible value, 488 *r = cast(uint64_t)(-1); // and return the largest 489 } 490 else { 491 return cast(uint64_t)(-1); // possible quotient. 492 } 493 } 494 495 /* count leading zeros */ 496 s = libdivide__count_leading_zeros64(v); // 0 <= s <= 63. 497 if (s > 0) { 498 v = v << s; // Normalize divisor. 499 un64 = (u1 << s) | ((u0 >> (64 - s)) & (-s >> 31)); 500 un10 = u0 << s; // Shift dividend left. 501 } 502 else { 503 // Avoid undefined behavior. 504 un64 = u1 | u0; 505 un10 = u0; 506 } 507 508 vn1 = v >> 32; // Break divisor up into 509 vn0 = v & 0xFFFFFFFF; // two 32-bit digits. 510 511 un1 = un10 >> 32; // Break right half of 512 un0 = un10 & 0xFFFFFFFF; // dividend into two digits. 513 514 q1 = un64/vn1; // Compute the first 515 rhat = un64 - q1*vn1; // quotient digit, q1. 516 again1: 517 if (q1 >= b || q1*vn0 > b*rhat + un1) { 518 q1 = q1 - 1; 519 rhat = rhat + vn1; 520 if (rhat < b) goto again1; 521 } 522 523 un21 = un64*b + un1 - q1*v; // Multiply and subtract. 524 525 q0 = un21/vn1; // Compute the second 526 rhat = un21 - q0*vn1; // quotient digit, q0. 527 again2: 528 if (q0 >= b || q0*vn0 > b*rhat + un0) { 529 q0 = q0 - 1; 530 rhat = rhat + vn1; 531 if (rhat < b) goto again2; 532 } 533 534 if (r !is null) { // If remainder is wanted, 535 *r = (un21*b + un0 - q0*v) >> s; // return it. 536 } 537 return q1*b + q0; 538 } 539 540 static uint64_t libdivide__mullhi_u64(uint64_t x, uint64_t y) pure nothrow @safe @nogc { 541 static if (is(ucent)) { 542 ucent xl = x, yl = y; 543 ucent rl = xl * yl; 544 return cast(ucent)(rl >> 64); 545 } 546 else { 547 //full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) 548 const uint32_t mask = 0xFFFFFFFF; 549 const uint32_t x0 = cast(uint32_t)(x & mask), x1 = cast(uint32_t)(x >> 32); 550 const uint32_t y0 = cast(uint32_t)(y & mask), y1 = cast(uint32_t)(y >> 32); 551 const uint32_t x0y0_hi = libdivide__mullhi_u32(x0, y0); 552 const uint64_t x0y1 = x0 * cast(uint64_t)y1; 553 const uint64_t x1y0 = x1 * cast(uint64_t)y0; 554 const uint64_t x1y1 = x1 * cast(uint64_t)y1; 555 556 uint64_t temp = x1y0 + x0y0_hi; 557 uint64_t temp_lo = temp & mask, temp_hi = temp >> 32; 558 return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32); 559 } 560 } 561 562 unittest { 563 import std.random; 564 import std..string; 565 import std.typetuple; 566 567 int counter; 568 alias divisors = TypeTuple!(S32Divisor, U32Divisor, S64Divisor, U64Divisor); 569 enum tests = 5000; 570 571 foreach(D; divisors) { 572 foreach(j; 0 .. tests) { 573 D.Type d = uniform(1, D.Type.max); 574 D.Type n = uniform(D.Type.min, D.Type.max); 575 D.Type q = n / d; 576 577 D d2 = D(d); 578 D.Type q2 = n / d2; 579 assert (q == q2, "%s/%s = %s, not %s".format(n, d, q, q2)); 580 counter++; 581 } 582 } 583 assert(counter == divisors.length * tests, "counter=%s".format(counter)); 584 }