cpprisc16  June 16, 2020
cppextendedrisc16.cpp
Go to the documentation of this file.
1 /* -*- coding: latin-1 -*- */
2 
3 /** \file cpprisc16/cppextendedrisc16.cpp (March 12, 2017)
4  * \brief Extended instructions set.
5  *
6  * Piece of cpprisc16.
7  * https://bitbucket.org/OPiMedia/cpprisc16
8  *
9  * GPLv3 --- Copyright (C) 2017 Olivier Pirson
10  * http://www.opimedia.be/
11  */
12 
13 #include "cppextendedrisc16.hpp"
14 
15 #include "assert.hpp"
16 
17 
18 namespace cpprisc16 {
19  void
20  x_add32(unsigned int a2, unsigned int a1, unsigned int b2, unsigned int b1,
21  unsigned int tmp1, unsigned int tmp2, unsigned int tmp3) {
22  assert(0 < a1);
23  assert(a1 < nb_registers);
24 
25  assert(0 < a2);
26  assert(a2 < nb_registers);
27 
28  assert(0 < b1);
29  assert(b1 < nb_registers);
30 
31  assert(0 < b2);
32  assert(b2 < nb_registers);
33 
34  assert(0 < tmp1);
35  assert(tmp1 < nb_registers);
36 
37  assert(0 < tmp2);
38  assert(tmp2 < nb_registers);
39 
40  assert(0 < tmp3);
41  assert(tmp3 < nb_registers);
42 
43  ASSERT_7_DIFFERENT(a2, a1, b2, b1, tmp1, tmp2, tmp3);
44 
45  // b1:a1 <-- a1 + b1
46  x_addc(a1, b1, tmp1, tmp2, tmp3);
47 
48  // a2 <-- a2 + b2 + carry
49  i_add(a2, a2, b2);
50  i_add(a2, a2, b1);
51  }
52 
53 
54  void
55  x_addc(unsigned int a, unsigned int b,
56  unsigned int tmp1, unsigned int tmp2, unsigned int tmp3) {
57  assert(0 < a);
58  assert(a < nb_registers);
59 
60  assert(0 < b);
61  assert(b < nb_registers);
62 
63  assert(0 < tmp1);
64  assert(tmp1 < nb_registers);
65 
66  assert(0 < tmp2);
67  assert(tmp2 < nb_registers);
68 
69  assert(0 < tmp3);
70  assert(tmp3 < nb_registers);
71 
72  ASSERT_5_DIFFERENT(a, b, tmp1, tmp2, tmp3);
73 
74  x_set0x8000(tmp3); // tmp3 <-- mask 0b10...0
75 
76  i_nand(tmp1, a, tmp3); // tmp1 <-- 0b11...1 or 0b01...1 if (MSB of first operand) == 1
77  i_nand(tmp2, b, tmp3); // tmp2 <-- 0b11...1 or 0b01...1 if (MSB of second operand) == 1
78 
79  // Set sum on 16 bits
80  i_add(a, a, b); // a <-- (first operand) + (second operand)
81 
82  i_nand(b, a, tmp3); // b <-- 0b11...1 or 0b01...1 if (MSB of sum) == 1
83 
84  i_beq(tmp1, tmp2, addc_same_tmp1_tmp2);
85  i_nand(tmp3, b, tmp3); // tmp3 <-- 0b11...1 or 0b01...1 if carry on *15* bits sum
86  addc_same_tmp1_tmp2:
87 
88  // Set carry (if none pair of two MSBs 1 then jump to end)
89  i_nand(b, tmp1, tmp2); // 0b10...0 or 0 if MSBs of tmp1 and tmp2 are 1
90  i_beq(b, 0, addc_end);
91  i_nand(b, tmp1, tmp3); // 0b10...0 or 0 if MSBs of tmp1 and tmp3 are 1
92  i_beq(b, 0, addc_end);
93  i_nand(b, tmp2, tmp3); // 0b10...0 or 0 if MSBs of tmp2 and tmp3 are 1
94  i_beq(b, 0, addc_end);
95 
96  i_addi(b, 0, 0x1); // b <-- carry
97 
98  addc_end:
99  return;
100  }
101 
102 
103  void
104  x_and_to(unsigned int result, unsigned int a, unsigned int b) {
105  assert(0 < result);
106  assert(result < nb_registers);
107 
108  assert(0 < a);
109  assert(a < nb_registers);
110 
111  assert(0 < b);
112  assert(b < nb_registers);
113 
114  i_nand(result, a, b);
115  x_not_to(result, result);
116  }
117 
118 
119  void
120  x_inc32(unsigned int a2, unsigned int a1) {
121  assert(0 < a2);
122  assert(a2 < nb_registers);
123 
124  assert(0 < a1);
125  assert(a1 < nb_registers);
126 
127  assert(a2 != a1);
128 
129  i_addi(a1, a1, 0x1); // a1 <-- a1 + 1
130  i_beq(a1, 0, inc32_carry);
131  x_branch(inc32_end);
132  inc32_carry:
133  i_addi(a2, a2, 0x1); // a2 <-- a2 + carry
134  inc32_end:
135  return;
136  }
137 
138 
139  void
140  x_is_lower_to(unsigned int result, unsigned int a, unsigned int b,
141  unsigned int tmp) {
142  assert(0 < result);
143  assert(result < nb_registers);
144 
145  assert(0 < a);
146  assert(a < nb_registers);
147 
148  assert(0 < b);
149  assert(b < nb_registers);
150 
151  assert(0 < tmp);
152  assert(tmp < nb_registers);
153 
154  ASSERT_4_DIFFERENT(result, a, b, tmp);
155 
156  x_mask0x8000_to(result, a);
157  i_beq(result, 0, a_msb_0);
158 
159  x_mask0x8000_to(result, b);
160  i_beq(result, 0, a_msb_1_b_msb_0);
161  // a MSB 1, b MSB 1
162  x_branch(must_check);
163 
164  a_msb_1_b_msb_0: // a > b
165  x_set0(result);
166  x_branch(end);
167 
168  a_msb_0:
169  x_mask0x8000_to(result, b);
170  i_beq(result, 0, must_check); // if a MSB 0, b MSB 0
171  // a MSB 0, b MSB 1: a < b
172  x_mov(result, 0x1);
173  x_branch(end);
174 
175  must_check:
176  // MSB of a and b are equal
177  x_sub_to(result, a, b);
178  x_mask0x8000(result, tmp);
179  end:
180  return;
181  }
182 
183 
184  void
185  x_lshift(unsigned int a) {
186  assert(0 < a);
187  assert(a < nb_registers);
188 
189  i_add(a, a, a);
190  }
191 
192 
193  void
194  x_lshift_to(unsigned int result, unsigned int a) {
195  assert(0 < result);
196  assert(result < nb_registers);
197 
198  assert(0 < a);
199  assert(a < nb_registers);
200 
201  i_add(result, a, a);
202  }
203 
204 
205  void
206  x_lshift32(unsigned int a2, unsigned int a1,
207  unsigned int tmp) {
208  assert(0 < a2);
209  assert(a2 < nb_registers);
210 
211  assert(0 < a1);
212  assert(a1 < nb_registers);
213 
214  assert(0 < tmp);
215  assert(tmp < nb_registers);
216 
217  ASSERT_3_DIFFERENT(a2, a1, tmp);
218 
219  // tmp <-- 0 or 0b10...0 if MSB of %a1 == 0 or 1
220  x_set0x8000(tmp);
221  x_and_to(tmp, a1, tmp);
222 
223  x_lshift(a1);
224  x_lshift(a2);
225 
226  // LSB of a2 <-- a2 or (a2 + 1) if MSB of original a1 == 0 or 1
227  i_beq(tmp, 0, lshift32_end);
228  i_addi(a2, a2, 0x1);
229  lshift32_end:
230  return;
231  }
232 
233 
234  void
235  x_lshift_8(unsigned int a) {
236  assert(0 < a);
237  assert(a < nb_registers);
238 
239  x_lshift(a);
240  x_lshift(a);
241 
242  x_lshift(a);
243  x_lshift(a);
244 
245  x_lshift(a);
246  x_lshift(a);
247 
248  x_lshift(a);
249  x_lshift(a);
250  }
251 
252 
253  void
254  x_lshift_8_to(unsigned int result, unsigned int a) {
255  assert(0 < result);
256  assert(result < nb_registers);
257 
258  assert(0 < a);
259  assert(a < nb_registers);
260 
261  x_lshift_to(result, a);
262  x_lshift(result);
263 
264  x_lshift(result);
265  x_lshift(result);
266 
267  x_lshift(result);
268  x_lshift(result);
269 
270  x_lshift(result);
271  x_lshift(result);
272  }
273 
274 
275  void
276  x_lshift32_8(unsigned int a2, unsigned int a1,
277  unsigned int tmp1, unsigned int tmp2, unsigned int tmp3) {
278  assert(0 < a2);
279  assert(a2 < nb_registers);
280 
281  assert(0 < a1);
282  assert(a1 < nb_registers);
283 
284  assert(0 < tmp1);
285  assert(tmp1 < nb_registers);
286 
287  assert(0 < tmp2);
288  assert(tmp2 < nb_registers);
289 
290  assert(0 < tmp3);
291  assert(tmp3 < nb_registers);
292 
293  ASSERT_5_DIFFERENT(a2, a1, tmp1, tmp2, tmp3);
294 
295  x_lshift_8(a2);
296 
297  x_rshift_8_to(tmp1, a1, tmp2, tmp3);
298  i_add(a2, a2, tmp1);
299 
300  x_lshift_8(a1);
301  }
302 
303 
304  void
305  x_mask0x8000(unsigned int a,
306  unsigned int tmp) {
307  assert(0 < a);
308  assert(a < nb_registers);
309 
310  assert(0 < tmp);
311  assert(tmp < nb_registers);
312 
313  assert(a != tmp);
314 
315  x_set0x8000(tmp);
316  x_and_to(a, a, tmp);
317  }
318 
319 
320  void
321  x_mask0x8000_to(unsigned int result, unsigned int a) {
322  assert(0 < result);
323  assert(result < nb_registers);
324 
325  assert(0 < a);
326  assert(a < nb_registers);
327 
328  assert(result != a);
329 
330  x_set0x8000(result);
331  x_and_to(result, a, result);
332  }
333 
334 
335  void
336  x_mov(unsigned int result, unsigned int a) {
337  assert(0 < result);
338  assert(result < nb_registers);
339 
340  assert(0 < a);
341  assert(a < nb_registers);
342 
343  i_add(result, a, 0);
344  }
345 
346 
347  void
348  x_mul(unsigned int a, unsigned int b,
349  unsigned int tmp1, unsigned int tmp2, unsigned int tmp3,
350  unsigned int tmp4, unsigned int tmp5) {
351  assert(0 < a);
352  assert(a < nb_registers);
353 
354  assert(0 < b);
355  assert(b < nb_registers);
356 
357  assert(0 < tmp1);
358  assert(tmp1 < nb_registers);
359 
360  assert(0 < tmp2);
361  assert(tmp2 < nb_registers);
362 
363  assert(0 < tmp3);
364  assert(tmp3 < nb_registers);
365 
366  assert(0 < tmp4);
367  assert(tmp4 < nb_registers);
368 
369  assert(0 < tmp5);
370  assert(tmp5 < nb_registers);
371 
372  ASSERT_7_DIFFERENT(a, b, tmp1, tmp2, tmp3, tmp4, tmp5);
373 
374  // tmp2:tmp1 will contain the product
375  x_set0(tmp1);
376  x_set0(tmp2);
377 
378 
379  // If first or second operand == 0 then jump to mul_end
380  i_beq(a, 0, mul_end);
381  i_beq(b, 0, mul_end);
382 
383 
384  // Memory 0: first operand (least significant bits)
385  // 1: first operand (most significant bits)
386  // 2: second operand
387  // 3: mask
388 
389 
390  // tmp3 <-- second operand (b), and save
391  x_mov(tmp3, b);
392  i_sw(tmp3, 0, 0x2); // save second operand
393 
394 
395  // b:a will contain first operand on 32 bits, shifted on each step
396  x_set0(b);
397 
398 
399  // tmp5 <-- mask = 1
400  i_addi(tmp5, 0, 0x1);
401 
402 
403  // For each bit on the second operand,
404  // if the current bit is 1
405  // then add the shifted first operand to the partial product.
406  mul_loop:
407  // INV: b:a == shifted first operand
408  // tmp2:tmp1 == partial product (sum of some shifted first operand)
409  // tmp3 == second operand
410  // tmp5 == mask
411 
412  // (Bit of second operand) == 0 or 1 ?
413  x_and_to(tmp4, tmp3, tmp5);
414  i_beq(tmp4, 0, mul_endif_bit);
415 
416  // If (bit of second operand) == 1
417  i_sw(a, 0, 0x0); // save first operand
418  i_sw(b, 0, 0x1);
419  i_sw(tmp5, 0, 0x3); // save mask
420 
421  // tmp2:tmp1 <-- tmp2:tmp1 + b:a
422  x_add32(tmp2, tmp1, b, a, tmp4, tmp3, tmp5);
423 
424  i_lw(tmp3, 0, 0x2); // reload second operand
425  i_lw(a, 0, 0x0); // reload first operand
426  i_lw(b, 0, 0x1);
427  i_lw(tmp5, 0, 0x3); // reload mask
428 
429  mul_endif_bit:
430 
431  // mask <-- mask << 1
432  x_lshift(tmp5);
433  i_beq(tmp5, 0, mul_end);
434 
435  // first operand <-- (first operand) << 1
436  x_lshift32(b, a, tmp4);
437 
438  // Jump to mul_loop
439  i_beq(0, 0, mul_loop);
440 
441  mul_end:
442  x_mov(a, tmp1);
443  x_mov(b, tmp2);
444  }
445 
446 
447  void
448  x_mul_karatsuba(unsigned int a, unsigned int b,
449  unsigned int tmp1, unsigned int tmp2, unsigned int tmp3,
450  unsigned int tmp4, unsigned int tmp5) {
451  assert(0 < a);
452  assert(a < nb_registers);
453 
454  assert(0 < b);
455  assert(b < nb_registers);
456 
457  assert(0 < tmp1);
458  assert(tmp1 < nb_registers);
459 
460  assert(0 < tmp2);
461  assert(tmp2 < nb_registers);
462 
463  assert(0 < tmp3);
464  assert(tmp3 < nb_registers);
465 
466  assert(0 < tmp4);
467  assert(tmp4 < nb_registers);
468 
469  assert(0 < tmp5);
470  assert(tmp5 < nb_registers);
471 
472  ASSERT_7_DIFFERENT(a, b, tmp1, tmp2, tmp3, tmp4, tmp5);
473 
474  // Split a and b in 8 bits parts: a2:a1 and b2:b1 and calculates:
475  // a2:a1 * b2:b1 == (a2*b2 << 16) + ((a1*b2 + a2*b1) << 8) + a1*b1
476  //
477  // Calculates a1*b2 + a2*b1 by Karatsuba formula:
478  // https://en.wikipedia.org/wiki/Karatsuba_algorithm
479  // a1*b2 + a2*b1 == a1*b1 + a2*b2 - (a1 - a2)*(b1 - b2)
480 
481  // Memory 0: a1
482  // 1: a2
483  // 2: b1
484  // 3: b2
485  // 4: sign of a1 - a2 (0 if >= 0, 0x8000 else)
486  // 5: sign of b1 - b2 (0 if >= 0, 0x8000 else)
487 
488  // tmp3 <-- a2 and save
489  // tmp4 <-- b2 and save
490  x_rshift_8_duo_to(tmp3, a, tmp4, b, tmp1, tmp2, tmp5);
491  i_sw(tmp3, 0, 0x1);
492  i_sw(tmp4, 0, 0x3);
493 
494  // tmp1 <-- a1 and save
495  // tmp2 <-- b1 and save
496  p_movi(tmp5, 0xFF);
497  x_and_to(tmp1, a, tmp5);
498  x_and_to(tmp2, b, tmp5);
499  i_sw(tmp1, 0, 0x0);
500  i_sw(tmp2, 0, 0x2);
501 
502  // a <-- a1*b1
503  x_mul8_to(a, tmp1, tmp2, tmp4, tmp5);
504 
505  // b <-- a2*b2
506  i_lw(tmp4, 0, 0x3); // reload b2
507  x_mul8_to(b, tmp3, tmp4, tmp1, tmp5);
508 
509  // tmp2:tmp1 <-- a1*b1 + a2*b2
510  x_mov(tmp1, a);
511  x_mov(tmp2, b);
512  x_addc(tmp1, tmp2, tmp3, tmp4, tmp5);
513 
514  // b:a <-- (a2 *b2):(a1 * b1) + (a1*b1 + a2*b2) << 8
515  i_beq(tmp2, 0, karatsuba_not_carry);
516  i_lui(tmp5, 0x4); // tmp5 <-- (1 << 8)
517  i_add(b, b, tmp5);
518  karatsuba_not_carry:
519 
520  x_rshift_8_to(tmp2, tmp1, tmp4, tmp5);
521  x_lshift_8(tmp1);
522 
523  x_add32(b, a, tmp2, tmp1, tmp3, tmp4, tmp5);
524 
525  // tmp2 <-- |a1 - a2| (result on size at most 8 bits)
526  i_lw(tmp1, 0, 0x0); // reload a1
527  i_lw(tmp2, 0, 0x1); // reload a2
528  x_sub_from(tmp2, tmp1); // tmp2 <-- a1 - a2
529  x_mask0x8000_to(tmp1, tmp2);
530  i_sw(tmp1, 0, 0x4); // save sign
531  i_beq(tmp1, 0, karatsuba_diffa_pos);
532  x_neg(tmp2); // tmp2 <-- a2 - a1
533  karatsuba_diffa_pos:
534 
535  // tmp4 <-- |b1 - b2| (result on size at most 8 bits)
536  i_lw(tmp3, 0, 0x2); // reload b1
537  i_lw(tmp4, 0, 0x3); // reload b2
538  x_sub_from(tmp4, tmp3); // tmp4 <-- b1 - b2
539  x_mask0x8000_to(tmp3, tmp4);
540  i_sw(tmp3, 0, 0x5); // save sign
541  i_beq(tmp3, 0, karatsuba_diffb_pos);
542  x_neg(tmp4); // tmp2 <-- a2 - a1
543  karatsuba_diffb_pos:
544 
545  // tmp1 <-- |a1 - a2| * |b1 - b2|
546  x_mul8_to(tmp1, tmp2, tmp4, tmp3, tmp5);
547 
548  // tmp2:tmp1 <-- (|a1 - a2| * |b1 - b2|) << 8
549  x_rshift_8_to(tmp2, tmp1, tmp4, tmp5);
550  x_lshift_8(tmp1);
551 
552  // Load and compare sign of differences (a1 - a2) and (b1 - b2)
553  i_lw(tmp4, 0, 0x4);
554  i_lw(tmp5, 0, 0x5);
555  i_beq(tmp4, tmp5, karatsuba_diff_same_sign);
556  x_branch(karatsuba_end);
557  karatsuba_diff_same_sign:
558  // tmp2:tmp1 <-- - tmp2:tmp1
559  x_not(tmp2);
560  x_not(tmp1);
561  x_inc32(tmp2, tmp1);
562 
563  karatsuba_end:
564  // b:a <-- (a2 *b2):(a1 * b1) + (a1*b1 + a2*b2 - (a1 - a2)*(b1 - b2)) << 8
565  x_add32(b, a, tmp2, tmp1, tmp3, tmp4, tmp5);
566 
567  return;
568  }
569 
570 
571  void
572  x_mul8_to(unsigned int result, unsigned int a, unsigned int b,
573  unsigned int tmp1, unsigned int tmp2) {
574  assert(0 < result);
575  assert(result < nb_registers);
576 
577  assert(0 < a);
578  assert(a < nb_registers);
579  assert(registers[a] <= 0xFF);
580 
581  assert(0 < b);
582  assert(b < nb_registers);
583  assert(registers[b] <= 0xFF);
584 
585  assert(0 < tmp1);
586  assert(tmp1 < nb_registers);
587 
588  assert(0 < tmp2);
589  assert(tmp2 < nb_registers);
590 
591  ASSERT_5_DIFFERENT(result, a, b, tmp1, tmp2);
592 
593  x_set0(result);
594  i_addi(tmp2, 0, 0x1); // mask <-- 1
595 
596  mul8_loop:
597  x_and_to(tmp1, b, tmp2);
598  i_beq(tmp1, 0, mul8_not1); // if (current bit of b) == 1
599  i_add(result, result, a);
600  mul8_not1:
601 
602  x_lshift(a); // a <-- a << 1
603  x_lshift(tmp2); // mask <-- mask << 1
604 
605  // If mask <= 0x80 (== 0b10000000 == 128) then loop
606  i_lui(tmp1, 0x4); // tmp1 <-- 0x100 == 0b100000000 == 256
607  i_beq(tmp2, tmp1, mul8_end);
608  x_branch(mul8_loop);
609  mul8_end:
610  return;
611  }
612 
613 
614  void
615  x_neg(unsigned int a) {
616  assert(0 < a);
617  assert(a < nb_registers);
618 
619  x_not(a);
620  i_addi(a, a, 0x1);
621  }
622 
623 
624  void
625  x_not(unsigned int a) {
626  assert(0 < a);
627  assert(a < nb_registers);
628 
629  i_nand(a, a, a);
630  }
631 
632 
633  void
634  x_not_to(unsigned int result, unsigned int a) {
635  assert(0 < result);
636  assert(result < nb_registers);
637 
638  assert(0 < a);
639  assert(a < nb_registers);
640 
641  i_nand(result, a, a);
642  }
643 
644 
645  void
646  x_or_to(unsigned int result, unsigned int a, unsigned int b) {
647  assert(result < nb_registers);
648 
649  assert(0 < a);
650  assert(a < nb_registers);
651 
652  assert(0 < b);
653  assert(b < nb_registers);
654 
655  assert(a != b);
656 
657  x_not_to(a, a);
658  x_not_to(b, b);
659  i_nand(result, a, b);
660  }
661 
662 
663  void
664  x_rshift_to(unsigned int result, unsigned int a,
665  unsigned int tmp1, unsigned int tmp2) {
666  assert(0 < result);
667  assert(result < nb_registers);
668 
669  assert(0 < a);
670  assert(a < nb_registers);
671 
672  assert(0 < tmp1);
673  assert(tmp1 < nb_registers);
674 
675  assert(0 < tmp2);
676  assert(tmp2 < nb_registers);
677 
678  ASSERT_4_DIFFERENT(result, a, tmp1, tmp2);
679 
680  x_set0(result);
681  i_addi(tmp1, 0, 0x1); // target mask
682 
683  rshift_loop:
684  x_lshift_to(tmp2, tmp1);
685 
686  // Test source bit
687  x_and_to(tmp2, a, tmp2);
688  i_beq(tmp2, 0, rshift_bit0);
689 
690  // If source bit was 1 then set target bit to 1
691  i_add(result, result, tmp1);
692  rshift_bit0:
693 
694  x_lshift(tmp1);
695 
696  i_beq(tmp1, 0, rshift_end);
697  x_branch(rshift_loop);
698 
699  rshift_end:
700  return;
701  }
702 
703 
704  void
705  x_rshift_8_to(unsigned int result, unsigned int a,
706  unsigned int tmp1, unsigned int tmp2) {
707  assert(0 < result);
708  assert(result < nb_registers);
709 
710  assert(0 < a);
711  assert(a < nb_registers);
712 
713  assert(0 < tmp1);
714  assert(tmp1 < nb_registers);
715 
716  assert(0 < tmp2);
717  assert(tmp2 < nb_registers);
718 
719  ASSERT_4_DIFFERENT(result, a, tmp1, tmp2);
720 
721  x_set0(result);
722 
723 
724  // 5th figure in base 4
725  i_lui(tmp1, 0xC); // source mask = 0x300
726 
727  x_and_to(tmp1, a, tmp1); // tmp1 <-- source & mask
728  i_beq(tmp1, 0, rshift_8_5th_end);
729 
730  i_lui(tmp2, 0x4); // 0x100
731  i_beq(tmp1, tmp2, rshift_8_5th_masked_1);
732 
733  i_lui(tmp2, 0x8); // 0x200
734  i_beq(tmp1, tmp2, rshift_8_5th_masked_2);
735 
736  i_addi(result, result, 0x3); // if masked 3
737  x_branch(rshift_8_5th_end);
738 
739  rshift_8_5th_masked_2:
740  i_addi(result, result, 0x2);
741  x_branch(rshift_8_5th_end);
742 
743  rshift_8_5th_masked_1:
744  i_addi(result, result, 0x1);
745 
746  rshift_8_5th_end:
747 
748 
749  // 6th figure in base 4
750  i_lui(tmp1, 0x30); // source mask = 0xC00
751 
752  x_and_to(tmp1, a, tmp1); // tmp1 <-- source & mask
753  i_beq(tmp1, 0, rshift_8_6th_end);
754 
755  i_lui(tmp2, 0x10); // 0x400
756  i_beq(tmp1, tmp2, rshift_8_6th_masked_1);
757 
758  i_lui(tmp2, 0x20); // 0x800
759  i_beq(tmp1, tmp2, rshift_8_6th_masked_2);
760 
761  i_addi(result, result, 0xC); // if masked 3
762  x_branch(rshift_8_6th_end);
763 
764  rshift_8_6th_masked_2:
765  i_addi(result, result, 0x8);
766  x_branch(rshift_8_6th_end);
767 
768  rshift_8_6th_masked_1:
769  i_addi(result, result, 0x4);
770 
771  rshift_8_6th_end:
772 
773 
774  // 7th figure in base 4
775  i_lui(tmp1, 0xC0); // source mask = 0x3000
776 
777  x_and_to(tmp1, a, tmp1); // tmp1 <-- source & mask
778  i_beq(tmp1, 0, rshift_8_7th_end);
779 
780  i_lui(tmp2, 0x40); // 0x1000
781  i_beq(tmp1, tmp2, rshift_8_7th_masked_1);
782 
783  i_lui(tmp2, 0x80); // 0x2000
784  i_beq(tmp1, tmp2, rshift_8_7th_masked_2);
785 
786  i_addi(result, result, 0x30); // if masked 3
787  x_branch(rshift_8_7th_end);
788 
789  rshift_8_7th_masked_2:
790  i_addi(result, result, 0x20);
791  x_branch(rshift_8_7th_end);
792 
793  rshift_8_7th_masked_1:
794  i_addi(result, result, 0x10);
795 
796  rshift_8_7th_end:
797 
798 
799  // 8th figure in base 4
800  i_lui(tmp1, 0x300); // source mask = 0xC000
801 
802  x_and_to(tmp1, a, tmp1); // tmp1 <-- source & mask
803  i_beq(tmp1, 0, rshift_8_8th_end);
804 
805  i_lui(tmp2, 0x100); // 0x4000
806  i_beq(tmp1, tmp2, rshift_8_8th_masked_1);
807 
808  i_lui(tmp2, 0x200); // 0x8000
809  i_beq(tmp1, tmp2, rshift_8_8th_masked_2);
810 
811  i_lui(tmp1, 0x3); // 0xC0
812  i_add(result, result, tmp1); // if masked 3
813  x_branch(rshift_8_8th_end);
814 
815  rshift_8_8th_masked_2:
816  i_lui(tmp1, 0x2); // 0x80
817  i_add(result, result, tmp1);
818  x_branch(rshift_8_8th_end);
819 
820  rshift_8_8th_masked_1:
821  i_lui(tmp1, 0x1); // 0x40
822  i_add(result, result, tmp1);
823 
824  rshift_8_8th_end:
825  return;
826  }
827 
828 
829  void
830  x_rshift_8_duo_to(unsigned int result_a, unsigned int a,
831  unsigned int result_b, unsigned int b,
832  unsigned int tmp1, unsigned int tmp2, unsigned int tmp3) {
833  assert(0 < result_a);
834  assert(result_a < nb_registers);
835 
836  assert(0 < a);
837  assert(a < nb_registers);
838 
839  assert(0 < result_b);
840  assert(result_b < nb_registers);
841 
842  assert(0 < b);
843  assert(b < nb_registers);
844 
845  assert(0 < tmp1);
846  assert(tmp1 < nb_registers);
847 
848  assert(0 < tmp2);
849  assert(tmp2 < nb_registers);
850 
851  assert(0 < tmp3);
852  assert(tmp3 < nb_registers);
853 
854  ASSERT_7_DIFFERENT(result_a, a, result_b, b, tmp1, tmp2, tmp3);
855 
856  x_set0(result_a);
857  x_set0(result_b);
858 
859 
860  i_lui(tmp3, 0xC); // source mask = 0x300
861 
862  // 5th figure of a in base 4
863  x_and_to(tmp1, a, tmp3); // tmp1 <-- source & mask
864  i_beq(tmp1, 0, rshift_8_a_5th_end);
865 
866  i_lui(tmp2, 0x4); // 0x100
867  i_beq(tmp1, tmp2, rshift_8_a_5th_masked_1);
868 
869  i_lui(tmp2, 0x8); // 0x200
870  i_beq(tmp1, tmp2, rshift_8_a_5th_masked_2);
871 
872  i_addi(result_a, result_a, 0x3); // if masked 3
873  x_branch(rshift_8_a_5th_end);
874 
875  rshift_8_a_5th_masked_2:
876  i_addi(result_a, result_a, 0x2);
877  x_branch(rshift_8_a_5th_end);
878 
879  rshift_8_a_5th_masked_1:
880  i_addi(result_a, result_a, 0x1);
881 
882  rshift_8_a_5th_end:
883 
884 
885  // 5th figure of b in base 4
886  x_and_to(tmp1, b, tmp3); // tmp1 <-- source & mask
887  i_beq(tmp1, 0, rshift_8_b_5th_end);
888 
889  i_lui(tmp2, 0x4); // 0x100
890  i_beq(tmp1, tmp2, rshift_8_b_5th_masked_1);
891 
892  i_lui(tmp2, 0x8); // 0x200
893  i_beq(tmp1, tmp2, rshift_8_b_5th_masked_2);
894 
895  i_addi(result_b, result_b, 0x3); // if masked 3
896  x_branch(rshift_8_b_5th_end);
897 
898  rshift_8_b_5th_masked_2:
899  i_addi(result_b, result_b, 0x2);
900  x_branch(rshift_8_b_5th_end);
901 
902  rshift_8_b_5th_masked_1:
903  i_addi(result_b, result_b, 0x1);
904 
905  rshift_8_b_5th_end:
906 
907 
908  i_lui(tmp3, 0x30); // source mask = 0xC00
909 
910  // 6th figure of a in base 4
911  x_and_to(tmp1, a, tmp3); // tmp1 <-- source & mask
912  i_beq(tmp1, 0, rshift_8_a_6th_end);
913 
914  i_lui(tmp2, 0x10); // 0x400
915  i_beq(tmp1, tmp2, rshift_8_a_6th_masked_1);
916 
917  i_lui(tmp2, 0x20); // 0x800
918  i_beq(tmp1, tmp2, rshift_8_a_6th_masked_2);
919 
920  i_addi(result_a, result_a, 0xC); // if masked 3
921  x_branch(rshift_8_a_6th_end);
922 
923  rshift_8_a_6th_masked_2:
924  i_addi(result_a, result_a, 0x8);
925  x_branch(rshift_8_a_6th_end);
926 
927  rshift_8_a_6th_masked_1:
928  i_addi(result_a, result_a, 0x4);
929 
930  rshift_8_a_6th_end:
931 
932 
933  // 6th figure of b in base 4
934  x_and_to(tmp1, b, tmp3); // tmp1 <-- source & mask
935  i_beq(tmp1, 0, rshift_8_b_6th_end);
936 
937  i_lui(tmp2, 0x10); // 0x400
938  i_beq(tmp1, tmp2, rshift_8_b_6th_masked_1);
939 
940  i_lui(tmp2, 0x20); // 0x800
941  i_beq(tmp1, tmp2, rshift_8_b_6th_masked_2);
942 
943  i_addi(result_b, result_b, 0xC); // if masked 3
944  x_branch(rshift_8_b_6th_end);
945 
946  rshift_8_b_6th_masked_2:
947  i_addi(result_b, result_b, 0x8);
948  x_branch(rshift_8_b_6th_end);
949 
950  rshift_8_b_6th_masked_1:
951  i_addi(result_b, result_b, 0x4);
952 
953  rshift_8_b_6th_end:
954 
955 
956  i_lui(tmp3, 0xC0); // source mask = 0x3000
957 
958  // 7th figure of a in base 4
959  x_and_to(tmp1, a, tmp3); // tmp1 <-- source & mask
960  i_beq(tmp1, 0, rshift_8_a_7th_end);
961 
962  i_lui(tmp2, 0x40); // 0x1000
963  i_beq(tmp1, tmp2, rshift_8_a_7th_masked_1);
964 
965  i_lui(tmp2, 0x80); // 0x2000
966  i_beq(tmp1, tmp2, rshift_8_a_7th_masked_2);
967 
968  i_addi(result_a, result_a, 0x30); // if masked 3
969  x_branch(rshift_8_a_7th_end);
970 
971  rshift_8_a_7th_masked_2:
972  i_addi(result_a, result_a, 0x20);
973  x_branch(rshift_8_a_7th_end);
974 
975  rshift_8_a_7th_masked_1:
976  i_addi(result_a, result_a, 0x10);
977 
978  rshift_8_a_7th_end:
979 
980 
981  // 7th figure of b in base 4
982  x_and_to(tmp1, b, tmp3); // tmp1 <-- source & mask
983  i_beq(tmp1, 0, rshift_8_b_7th_end);
984 
985  i_lui(tmp2, 0x40); // 0x1000
986  i_beq(tmp1, tmp2, rshift_8_b_7th_masked_1);
987 
988  i_lui(tmp2, 0x80); // 0x2000
989  i_beq(tmp1, tmp2, rshift_8_b_7th_masked_2);
990 
991  i_addi(result_b, result_b, 0x30); // if masked 3
992  x_branch(rshift_8_b_7th_end);
993 
994  rshift_8_b_7th_masked_2:
995  i_addi(result_b, result_b, 0x20);
996  x_branch(rshift_8_b_7th_end);
997 
998  rshift_8_b_7th_masked_1:
999  i_addi(result_b, result_b, 0x10);
1000 
1001  rshift_8_b_7th_end:
1002 
1003 
1004  i_lui(tmp3, 0x300); // source mask = 0xC000
1005 
1006  // 8th figure of a in base 4
1007  x_and_to(tmp1, a, tmp3); // tmp1 <-- source & mask
1008  i_beq(tmp1, 0, rshift_8_a_8th_end);
1009 
1010  i_lui(tmp2, 0x100); // 0x4000
1011  i_beq(tmp1, tmp2, rshift_8_a_8th_masked_1);
1012 
1013  i_lui(tmp2, 0x200); // 0x8000
1014  i_beq(tmp1, tmp2, rshift_8_a_8th_masked_2);
1015 
1016  i_lui(tmp1, 0x3); // 0xC0
1017  i_add(result_a, result_a, tmp1); // if masked 3
1018  x_branch(rshift_8_a_8th_end);
1019 
1020  rshift_8_a_8th_masked_2:
1021  i_lui(tmp1, 0x2); // 0x80
1022  i_add(result_a, result_a, tmp1);
1023  x_branch(rshift_8_a_8th_end);
1024 
1025  rshift_8_a_8th_masked_1:
1026  i_lui(tmp1, 0x1); // 0x40
1027  i_add(result_a, result_a, tmp1);
1028 
1029  rshift_8_a_8th_end:
1030 
1031 
1032  // 8th figure of b in base 4
1033  x_and_to(tmp1, b, tmp3); // tmp1 <-- source & mask
1034  i_beq(tmp1, 0, rshift_8_b_8th_end);
1035 
1036  i_lui(tmp2, 0x100); // 0x4000
1037  i_beq(tmp1, tmp2, rshift_8_b_8th_masked_1);
1038 
1039  i_lui(tmp2, 0x200); // 0x8000
1040  i_beq(tmp1, tmp2, rshift_8_b_8th_masked_2);
1041 
1042  i_lui(tmp1, 0x3); // 0xC0
1043  i_add(result_b, result_b, tmp1); // if masked 3
1044  x_branch(rshift_8_b_8th_end);
1045 
1046  rshift_8_b_8th_masked_2:
1047  i_lui(tmp1, 0x2); // 0x80
1048  i_add(result_b, result_b, tmp1);
1049  x_branch(rshift_8_b_8th_end);
1050 
1051  rshift_8_b_8th_masked_1:
1052  i_lui(tmp1, 0x1); // 0x40
1053  i_add(result_b, result_b, tmp1);
1054 
1055  rshift_8_b_8th_end:
1056  return;
1057  }
1058 
1059 
1060  void
1061  x_rshift_8_signed_to(unsigned int result, unsigned int a,
1062  unsigned int tmp1, unsigned int tmp2,
1063  unsigned int tmp3) {
1064  assert(0 < result);
1065  assert(result < nb_registers);
1066 
1067  assert(0 < a);
1068  assert(a < nb_registers);
1069 
1070  assert(0 < tmp1);
1071  assert(tmp1 < nb_registers);
1072 
1073  assert(0 < tmp2);
1074  assert(tmp2 < nb_registers);
1075 
1076  assert(0 < tmp3);
1077  assert(tmp3 < nb_registers);
1078 
1079  ASSERT_5_DIFFERENT(result, a, tmp1, tmp2, tmp3);
1080 
1081  x_mask0x8000_to(tmp3, a);
1082 
1083  x_rshift_8_to(result, a, tmp1, tmp2);
1084 
1085  i_beq(tmp3, 0, rshift_8_signed_end);
1086  i_lui(tmp1, 0x3FC); // tmp1 <-- 0xFF00
1087  i_add(result, result, tmp1);
1088  rshift_8_signed_end:
1089  return;
1090  }
1091 
1092 
1093  void
1094  x_set0(unsigned int result) {
1095  assert(0 < result);
1096  assert(result < nb_registers);
1097 
1098  i_add(result, 0, 0);
1099  }
1100 
1101 
1102  void
1103  x_set0x8000(unsigned int a) {
1104  assert(0 < a);
1105  assert(a < nb_registers);
1106 
1107  i_lui(a, 0x200); // = 0x200 << 6
1108  }
1109 
1110 
1111  void
1112  x_sqr(unsigned int a, unsigned int result2,
1113  unsigned int tmp1, unsigned int tmp2, unsigned int tmp3,
1114  unsigned int tmp4, unsigned int tmp5) {
1115  assert(0 < a);
1116  assert(a < nb_registers);
1117 
1118  assert(0 < result2);
1119  assert(result2 < nb_registers);
1120 
1121  assert(0 < tmp1);
1122  assert(tmp1 < nb_registers);
1123 
1124  assert(0 < tmp2);
1125  assert(tmp2 < nb_registers);
1126 
1127  assert(0 < tmp3);
1128  assert(tmp3 < nb_registers);
1129 
1130  assert(0 < tmp4);
1131  assert(tmp4 < nb_registers);
1132 
1133  assert(0 < tmp5);
1134  assert(tmp5 < nb_registers);
1135 
1136  ASSERT_7_DIFFERENT(a, result2, tmp1, tmp2, tmp3, tmp4, tmp5);
1137 
1138  // Split a in 8 bits parts: a2:a1 and calculates:
1139  // a2:a1 * a2:a1 == (a2*a2 << 16) + (a1*a2 << 9) + a1*a1
1140 
1141  // Memory 0: a1
1142  // 1: a2
1143 
1144  // tmp2 <-- a2 and save
1145  x_rshift_8_to(tmp2, a, tmp1, tmp3);
1146  i_sw(tmp2, 0, 0x1);
1147 
1148  // tmp1 <-- a1 and save
1149  p_movi(tmp3, 0xFF);
1150  x_and_to(tmp1, a, tmp3);
1151  i_sw(tmp1, 0, 0x0);
1152 
1153  // a <-- a1*a1
1154  x_sqr8_to(a, tmp1, tmp3, tmp4, tmp5);
1155 
1156  // result2 <-- a2*a2
1157  x_sqr8_to(result2, tmp2, tmp3, tmp4, tmp5);
1158 
1159  // tmp1 <-- a1 * a2
1160  i_lw(tmp2, 0, 0x0); // reload a1
1161  i_lw(tmp3, 0, 0x1); // reload a2
1162  x_mul8_to(tmp1, tmp2, tmp3, tmp4, tmp5);
1163 
1164  // tmp2:tmp1 <-- (a1 * a2) << 8
1165  x_rshift_8_to(tmp2, tmp1, tmp4, tmp5);
1166  x_lshift_8(tmp1);
1167 
1168  // tmp2:tmp1 <-- (a1 * a2) << 9
1169  x_lshift32(tmp2, tmp1, tmp3);
1170 
1171  // result2:a <-- (a2 *a2):(a1 * a1) + (a1 * a2) << 9
1172  x_add32(result2, a, tmp2, tmp1, tmp3, tmp4, tmp5);
1173 
1174  return;
1175  }
1176 
1177 
1178  void
1179  x_sqr8_to(unsigned int result, unsigned int a,
1180  unsigned int tmp1, unsigned int tmp2, unsigned int tmp3) {
1181  assert(0 < result);
1182  assert(result < nb_registers);
1183 
1184  assert(0 < a);
1185  assert(a < nb_registers);
1186  assert(registers[a] <= 0xFF);
1187 
1188  assert(0 < tmp1);
1189  assert(tmp1 < nb_registers);
1190 
1191  assert(0 < tmp2);
1192  assert(tmp2 < nb_registers);
1193 
1194  assert(0 < tmp3);
1195  assert(tmp3 < nb_registers);
1196 
1197  ASSERT_5_DIFFERENT(result, a, tmp1, tmp2, tmp3);
1198 
1199  // TODO(OPi) Specific algo ?
1200 
1201  x_mov(tmp3, a);
1202 
1203  x_set0(result);
1204  i_addi(tmp2, 0, 0x1); // mask <-- 1
1205 
1206  sqr8_loop:
1207  x_and_to(tmp1, tmp3, tmp2);
1208  i_beq(tmp1, 0, sqr8_not1); // if (current bit of b) == 1
1209  i_add(result, result, a);
1210  sqr8_not1:
1211 
1212  x_lshift(a); // a <-- a << 1
1213  x_lshift(tmp2); // mask <-- mask << 1
1214 
1215  // If mask <= 0x80 (== 0b10000000 == 128) then loop
1216  i_lui(tmp1, 0x4); // tmp1 <-- 0x100 == 0b100000000 == 256
1217  i_beq(tmp2, tmp1, sqr8_end);
1218  x_branch(sqr8_loop);
1219  sqr8_end:
1220  return;
1221  }
1222 
1223 
1224  void
1225  x_sub_from(unsigned int a, unsigned int b) {
1226  assert(0 < a);
1227  assert(a < nb_registers);
1228 
1229  assert(0 < b);
1230  assert(b < nb_registers);
1231 
1232  assert(a != b);
1233 
1234  x_neg(a);
1235  i_add(a, a, b);
1236  }
1237 
1238 
1239  void
1240  x_sub_to(unsigned int result, unsigned int a, unsigned int b) {
1241  assert(0 < result);
1242  assert(result < nb_registers);
1243 
1244  assert(0 < a);
1245  assert(a < nb_registers);
1246 
1247  assert(0 < b);
1248  assert(b < nb_registers);
1249 
1250  ASSERT_3_DIFFERENT(result, a, b);
1251 
1252  x_mov(result, b);
1253  x_sub_from(result, a);
1254  }
1255 
1256 
1257  void
1258  x_swap(unsigned int a, unsigned int b, unsigned int tmp) {
1259  assert(0 < a);
1260  assert(a < nb_registers);
1261 
1262  assert(0 < b);
1263  assert(b < nb_registers);
1264 
1265  assert(0 < tmp);
1266  assert(tmp < nb_registers);
1267 
1268  ASSERT_3_DIFFERENT(a, b, tmp);
1269 
1270  x_mov(tmp, a);
1271  x_mov(a, b);
1272  x_mov(b, tmp);
1273  }
1274 } // namespace cpprisc16
void p_movi(unsigned int result, immed_t immed)
(MOV Immediate) R[result] <– immed
Definition: cpprisc16.cpp:166
void x_mask0x8000(unsigned int a, unsigned int tmp)
R[a] <– R[a] & 0x8000 (== R[a] & 0b1000000000000000 == R[a] & 32768)
#define ASSERT_7_DIFFERENT(a, b, c, d, e, f, g)
Check if a, b, c, d, e, f and g are different.
Definition: assert.hpp:98
#define x_branch(label)
Jump to label.
void x_neg(unsigned int a)
R[a] <– -R[a] (two&#39;s complement)
void x_not_to(unsigned int result, unsigned int a)
R[result] <– ~R[a].
void x_sqr(unsigned int a, unsigned int result2, unsigned int tmp1, unsigned int tmp2, unsigned int tmp3, unsigned int tmp4, unsigned int tmp5)
R[result2]:R[a] <– R[a]*R[a].
void x_lshift(unsigned int a)
R[a] <– R[a] << 1 (== R[a]*2)
void x_sqr8_to(unsigned int result, unsigned int a, unsigned int tmp1, unsigned int tmp2, unsigned int tmp3)
R[result] <– R[a] * R[a].
void x_mul8_to(unsigned int result, unsigned int a, unsigned int b, unsigned int tmp1, unsigned int tmp2)
R[result] <– R[a] * R[b].
void x_set0x8000(unsigned int a)
R[result] <– 0x8000 (== 0b1000000000000000 == 32768)
void x_inc32(unsigned int a2, unsigned int a1)
R[a2]:R[a1] <– R[a2]:R[a1] + 1.
void i_addi(unsigned int result, unsigned int a, immed_t immed6)
(ADD Immediate) R[result] <– R[a] + immed6
Definition: cpprisc16.cpp:64
void x_not(unsigned int a)
R[a] <– ~R[a].
void x_mov(unsigned int result, unsigned int a)
R[result] <– R[a].
#define ASSERT_5_DIFFERENT(a, b, c, d, e)
Check if a, b, c, d and e are different.
Definition: assert.hpp:52
#define i_beq(a, b, label)
(Branch if EQual) If R[a] == R[b] then jump to label.
Definition: cpprisc16.hpp:30
void i_add(unsigned int result, unsigned int a, unsigned int b)
R[result] <– R[a] + R[b].
Definition: cpprisc16.cpp:51
void x_mul_karatsuba(unsigned int a, unsigned int b, unsigned int tmp1, unsigned int tmp2, unsigned int tmp3, unsigned int tmp4, unsigned int tmp5)
R[b]:R[a] <– R[a] * R[b] by Karatsuba algorithm: https://en.wikipedia.org/wiki/Karatsuba_algorithm.
word16_t registers[8]
Registers.
Definition: cpprisc16.cpp:43
void x_rshift_8_to(unsigned int result, unsigned int a, unsigned int tmp1, unsigned int tmp2)
R[result] <– R[a] >> 8 (== R[a]/256)
void x_and_to(unsigned int result, unsigned int a, unsigned int b)
R[result] <– R[a] & R[b].
void x_add32(unsigned int a2, unsigned int a1, unsigned int b2, unsigned int b1, unsigned int tmp1, unsigned int tmp2, unsigned int tmp3)
R[a2]:R[a1] <– R[a2]:R[a1] + R[b2]:R[b1].
const unsigned int nb_registers
Number of registers: 8 word16_t items.
Definition: cpprisc16.cpp:28
void x_mul(unsigned int a, unsigned int b, unsigned int tmp1, unsigned int tmp2, unsigned int tmp3, unsigned int tmp4, unsigned int tmp5)
R[b]:R[a] <– R[a] * R[b] by standard algorithm: https://en.wikipedia.org/wiki/Multiplication_algorit...
void x_addc(unsigned int a, unsigned int b, unsigned int tmp1, unsigned int tmp2, unsigned int tmp3)
R[b]:R[a] <– R[a] + R[b].
void x_lshift32(unsigned int a2, unsigned int a1, unsigned int tmp)
R[a2]:R[a1] <– (R[a2]:R[a1]) << 1 (== (R[a2]:R[a1])*2)
void i_sw(unsigned int a, unsigned int result, immed_t immed6)
(Store Word) Memory[R[result] + immed6] <– R[a]
Definition: cpprisc16.cpp:133
void x_rshift_8_duo_to(unsigned int result_a, unsigned int a, unsigned int result_b, unsigned int b, unsigned int tmp1, unsigned int tmp2, unsigned int tmp3)
R[resulta] <– R[a] >> 8 (== R[a]/256) R[resultb] <– R[b] >> 8.
void x_sub_to(unsigned int result, unsigned int a, unsigned int b)
R[result] <– R[a] - R[b].
#define ASSERT_4_DIFFERENT(a, b, c, d)
Check if a, b, c and d are different.
Definition: assert.hpp:37
void x_swap(unsigned int a, unsigned int b, unsigned int tmp)
R[a], R[b] <– R[b], R[a].
void x_lshift32_8(unsigned int a2, unsigned int a1, unsigned int tmp1, unsigned int tmp2, unsigned int tmp3)
R[a2]:R[a1] <– (R[a2]:R[a1]) << 8 (== (R[a2]:R[a1])*256)
void x_lshift_8(unsigned int a)
R[a] <– R[a] << 8 (== R[a]*256)
Helper assertions.
void x_rshift_to(unsigned int result, unsigned int a, unsigned int tmp1, unsigned int tmp2)
R[result] <– R[a] >> 1 (== R[a]/2)
void x_set0(unsigned int result)
R[a] <– 0.
void x_lshift_to(unsigned int result, unsigned int a)
R[result] <– R[a] << 1 (== R[a]*2)
void x_is_lower_to(unsigned int result, unsigned int a, unsigned int b, unsigned int tmp)
R[result] <– (positive value) if a < b, 0 else.
void x_sub_from(unsigned int a, unsigned int b)
R[a] <– -R[a] + R[b].
void x_lshift_8_to(unsigned int result, unsigned int a)
R[result] <– R[a] << 8 (== R[a]*256)
void x_or_to(unsigned int result, unsigned int a, unsigned int b)
R[result] <– R[a] | R[b].
Extended instructions set: some extra operations x_* implemented with RiSC16.
void i_nand(unsigned int result, unsigned int a, unsigned int b)
R[result] <– R[a] NAND R[b] (== ~(a & b))
Definition: cpprisc16.cpp:120
void x_mask0x8000_to(unsigned int result, unsigned int a)
R[result] <– R[a] & 0x8000 (== R[a] & 0b1000000000000000 == R[a] & 32768)
void x_rshift_8_signed_to(unsigned int result, unsigned int a, unsigned int tmp1, unsigned int tmp2, unsigned int tmp3)
R[result] <– R[a] >> 8 with extension of the sign.
void i_lw(unsigned int result, unsigned int a, immed_t immed6)
(Load Word) R[result] <– Memory[R[a] + immed6]
Definition: cpprisc16.cpp:103
void i_lui(unsigned int result, immed_t immed10)
(Load Upper Immediate) R[result] <– immed10 << 6
Definition: cpprisc16.cpp:91
#define ASSERT_3_DIFFERENT(a, b, c)
Check if a, b and c are different.
Definition: assert.hpp:26