iLLD_TC27xD  1.0
IfxCpu_IntrinsicsGnuc.h
Go to the documentation of this file.
1 /**
2  * \file IfxCpu_IntrinsicsGnuc.h
3  * \version iLLD_1_0_0_11_0
4  * \copyright Copyright (c) 2012 Infineon Technologies AG. All rights reserved.
5  *
6  *
7  *
8  * IMPORTANT NOTICE
9  *
10  *
11  * Infineon Technologies AG (Infineon) is supplying this file for use
12  * exclusively with Infineon's microcontroller products. This file can be freely
13  * distributed within development tools that are supporting such microcontroller
14  * products.
15  *
16  * THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
17  * OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
18  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
19  * INFINEON SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL,
20  * OR CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
21  *
22  * \defgroup IfxLld_Cpu_Intrinsics_Gnuc Intrinsics for GNU compiler
23  * \ingroup IfxLld_Cpu_Intrinsics
24  *
25  */
26 
27 #ifndef IFXCPU_INTRINSICSGNUC_H
28 #define IFXCPU_INTRINSICSGNUC_H
29 
30 /* old style intrinsics handling for AGENtiX environment */
31 #if defined(SCTB_EMBEDDED)
32 # define IFXCPU_INTRINSICSGNUC_USE_MACHINE_INTRINSICS 0
33 #else
34 # define IFXCPU_INTRINSICSGNUC_USE_MACHINE_INTRINSICS 1
35 #endif
36 
37 /******************************************************************************/
38 #include "Ifx_Types.h"
39 
40 #if IFXCPU_INTRINSICSGNUC_USE_MACHINE_INTRINSICS
41 #include "machine/intrinsics.h"
42 #endif
43 
44 /******************************************************************************/
45 /* *INDENT-OFF* */
46 #define STRINGIFY(x) #x
47 
48 /** Function call without return
49  */
50 #define __non_return_call(fun) __asm__ volatile ("ji %0"::"a"(fun))
51 
52 /** Jump and link
53  */
54 IFX_INLINE void __jump_and_link(void (*fun)(void))
55 {
56  __asm__ volatile ("jli %0"::"a"(fun));
57 }
58 
59 /** \defgroup IfxLld_Cpu_Intrinsics_Gnuc_any_type Cross type arithmetic operation
60  *
61  * Macro compatible with float, fix point, signed integer and unsigned integer
62  *
63  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
64  * \{
65  */
66 #define __minX(X,Y) ( ((X) < (Y)) ? (X) : (Y) )
67 #define __maxX(X,Y) ( ((X) > (Y)) ? (X) : (Y) )
68 #define __saturateX(X,Min,Max) ( __minX(__maxX(X, Min), Max) )
69 #define __checkrangeX(X,Min,Max) (((X) >= (Min)) && ((X) <= (Max)))
70 /** \} */
71 
72 /** \defgroup IfxLld_Cpu_Intrinsics_Gnuc_singed_integer Signed integer operation
73  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
74  * \{
75  */
76 #define __saturate(X,Min,Max) ( __min(__max(X, Min), Max) )
77 /** \} */
78 
79 /** \defgroup IfxLld_Cpu_Intrinsics_Gnuc_unsinged_integer Unsigned integer operation
80  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
81  * \{
82  */
83 #define __saturateu(X,Min,Max) ( __minu(__maxu(X, Min), Max) )
84 /** \} */
85 
86 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucmin_max Minimum and Maximum of (sint16) Integers
87  These intrinsic functions return the minimum or maximum of a sint16, uint16 or sint16
88  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
89  * \{
90  */
91 
92 /** Return maximum of two integers
93  */
95 {
96  sint32 res;
97  __asm__ volatile ("max %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
98  return res;
99 }
100 
101 /** Return maximum of two sint16
102  */
104 {
105  sint32 res;
106  __asm__ volatile ("max.h %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
107  return res;
108 }
109 /** Return maximum of two unsigned integers
110  */
112 {
113  uint32 res;
114  __asm__ volatile ("max.u %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
115  return res;
116 }
117 
118 /** Return minimum of two integers
119  */
121 {
122  sint32 res;
123  __asm__ volatile ("min %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
124  return res;
125 }
126 
127 /** Return minimum of two sint16
128  */
130 {
131  sint16 res;
132  __asm__ volatile ("min.h %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
133  return res;
134 }
135 
136 /** Return minimum of two unsigned integers
137  */
139 {
140  uint32 res;
141  __asm__ volatile ("min.u %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
142  return res;
143 }
144 
145 /** \} */
146 
147 /** \defgroup intrinsicsgnuc_float Floating point operation
148  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
149  * \{
150  */
151 
152 #define __sqrf(X) ((X) * (X))
153 #define __sqrtf(X) sqrtf(X)
154 #define __checkrange(X,Min,Max) (((X) >= (Min)) && ((X) <= (Max)))
155 
156 #define __roundf(X) ((((X) - (sint32)(X)) > 0.5) ? (1 + (sint32)(X)) : ((sint32)(X)))
157 #define __absf(X) ( ((X) < 0.0) ? -(X) : (X) )
158 #define __minf(X,Y) ( ((X) < (Y)) ? (X) : (Y) )
159 #define __maxf(X,Y) ( ((X) > (Y)) ? (X) : (Y) )
160 #define __saturatef(X,Min,Max) ( __minf(__maxf(X, Min), Max) )
161 #define __checkrangef(X,Min,Max) (((X) >= (Min)) && ((X) <= (Max)))
162 
163 #define __abs_stdreal(X) ( ((X) > 0.0) ? (X) : -(X) )
164 #define __min_stdreal(X,Y) ( ((X) < (Y)) ? (X) : (Y) )
165 #define __max_stdreal(X,Y) ( ((X) > (Y)) ? (X) : (Y) )
166 #define __saturate_stdreal(X,Min,Max) ( __min_stdreal(__max_stdreal(X, Min), Max) )
167 
168 #define __neqf(X,Y) ( ((X) > (Y)) || ((X) < (Y)) ) /**< X != Y */
169 #define __leqf(X,Y) ( !((X) > (Y)) ) /**< X <= Y */
170 #define __geqf(X,Y) ( !((X) < (Y)) ) /**< X >= Y */
171 /** \} */
172 
173 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucfractional Fractional Arithmetic Support
174  The next table provides an overview of intrinsic functions to convert fractional values. Note that the
175  TASKING VX-toolset C compiler for TriCore fully supports the fractional type so normally you should not
176  need these intrinsic functions (except for __mulfractlong). For compatibility reasons the TASKING C
177  compiler does support these functions.
178  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
179  * \{
180  */
181 
182 /** Count the consecutive number of bits that have the same value as bit 15 of an sfract
183  */
185 {
186  sint16 res;
187  __asm__ volatile ("cls %0,%1":"=d"(res):"d"(a):"memory");
188  return res;
189 }
190 
191 /** Convert fract to float
192  */
194 {
195  float res;
196  __asm__ volatile ("q31tof %0,%1,%2":"=d"(res):"d"(a), "d"(0):"memory");
197  return res;
198 }
199 
200 /** Convert float to fract
201  */
203 {
204  fract res;
205  __asm__ volatile ("ftoq31 %0,%1,%2":"=d"(res):"d"(a), "d"(0):"memory");
206  return res;
207 }
208 
209 /** Convert laccum to fract
210  */
212 {
213  fract res;
214  __asm__ volatile ("dextr %0,%H1,%L1,0x11":"=&d" (res):"d" (a):"memory");
215  return res;
216 }
217 
218 /** Multiply-add with rounding. Returns the rounded result of ( a + b * c )
219  */
221 {
222  sfract res;
223  __asm__ volatile ("maddrs.q %0,%1,%2U,%3U,1":"=d"(res):"d"(a), "d"(b), "d"(c):"memory");
224  return res;
225 }
226 
227 /** Multiply-add sfract. Returns ( a + b * c )
228  */
230 {
231  sfract res;
232  __asm__ volatile ("madds.q %0,%1,%2U,%3U,1":"=d"(res):"d"(a), "d"(b), "d"(c):"memory");
233  return res;
234 }
235 
236 /** Integer part of the multiplication of a fract and a fract
237  */
239 {
240  long res;
241  __asm__ volatile ("mul.q %0,%1,%2,1":"=d"(res):"d"(a), "d"(b):"memory");
242  return res;
243 }
244 
245 /** Integer part of the multiplication of a fract and a long
246  */
248 {
249  long res;
250  __asm__ volatile ("mul.q %0,%1,%2,1":"=d"(res):"d"(a), "d"(b):"memory");
251  return res;
252 }
253 
254 /** Convert fract to sfract
255  */
257 {
258  sfract res;
259  __asm__ volatile ("mov.u %0,0x8000 \n\
260  adds %0,%1 \n\
261  insert %0,%0,0,0,0x10 "
262  :"=&d"(res):"d"(a):"memory");
263  return res;
264 }
265 
266 /** Convert signed sint16 to sfract
267  */
269 {
270  sfract res;
271  __asm__ volatile ("sh %0,%1,16":"=d"(res):"d"(a):"memory");
272  return res;
273 }
274 
275 /** Convert sfract to sint16
276  */
278 {
279  sint16 res;
280  __asm__ volatile ("sh %0,%1,-16":"=d"(res):"d"(a):"memory");
281  return res;
282 }
283 
284 /** Convert sfract to uint16
285  */
287 {
288  uint16 res;
289  __asm__ volatile ("sh %0,%1,-16":"=d"(res):"d"(a):"memory");
290  return res;
291 }
292 
293 /** Left/right shift of an laccum
294  */
296 {
297  laccum res;
298  __asm__ volatile ("jge %2,0,0f \n\
299  sha %H0,%H1,%2 \n\
300  rsub %2,%2,0 \n\
301  dextr %L0,%H1,%L1,%2 \n\
302  j 1f \n\
303  0:dextr %H0,%H1,%L1,%2 \n\
304  sha %L0,%L1,%2 \n\
305  1:"
306  :"=d"(res):"d"(a), "d"(b):"memory");
307  return res;
308 }
309 
310 /** Left/right shift of an fract
311  */
313 {
314  fract res;
315  __asm__ volatile ("shas %0,%1,%2":"=d"(res):"d"(a), "d"(b):"memory");
316  return res;
317 }
318 
319 /** Left/right shift of an sfract
320  */
322 {
323  sfract res;
324  __asm__ volatile ("shas %0,%1,%2":"=d"(res):"d"(a), "d"(b):"memory");
325  return res;
326 }
327 
328 /** Convert uint16 to sfract
329  */
331 {
332  sfract res;
333  __asm__ volatile ("sh %0,%1,16":"=d"(res):"d"(a):"memory");
334  return res;
335 }
336 
337 /** \} */
338 
339 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucinsert Insert / Extract Bit-fields and Bits
340  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
341  * \{
342  */
343 
344 /** Extract a bit-field (bit pos to bit pos+width) from value
345  */
347 {
348  sint32 res;
349  __asm__ volatile ("mov %%d14,%2 \n\
350  mov %%d15,%3 \n\
351  extr %0,%1,%%e14"
352  : "=d" (res) : "d" (a), "d" (p), "d" (w):"d14", "d15");
353  return res;
354 }
355 
356 /** Same as __extr() but return bit-field as unsigned integer
357  */
359 {
360  uint32 res;
361  __asm__ volatile ("mov %%d14,%2 \n\
362  mov %%d15,%3 \n\
363  extr.u %0,%1,%%e14"
364  : "=d" (res) : "d" (a), "d" (p), "d" (w):"d14", "d15");
365  return res;
366 }
367 
368 /** Load a single bit.
369  */
370 //#define __getbit(address,bitoffset ) __extru( *(address), bitoffset, 1 )
371 #define __getbit(address, bitoffset) ((*(address) & (1U << (bitoffset))) != 0)
372 
373 /** Atomic load-modify-store.
374  */
375 #define __imaskldmst(address, value, bitoffset, bits) \
376  {long long tmp;\
377  __asm__("imask %A0,%1,%2,%3"\
378  :"=d"((long long)tmp)\
379  :"d"(value),"d"(bitoffset),"i"(bits): "memory");\
380  __asm__("ldmst [%0]0,%A1"::"a"(address),"d"(tmp): "memory");}
381 
382 /** Return trg but replace trgbit by srcbit in src.
383  */
384 IFX_INLINE sint32 __ins(sint32 trg, const sint32 trgbit, sint32 src, const sint32 srcbit)
385 {
386  sint32 res;
387  __asm__ volatile ("ins.t %0,%1,%2,%3,%4":"=d"(res):"d"(trg), "i"(trgbit), "d"(src), "i"(srcbit));
388  return res;
389 }
390 
391 /** Extract bit-field (width bits starting at bit 0) from src and insert it in trg at pos.
392  */
394 {
395  sint32 res;
396  __asm__ volatile ("mov %%d14,%2 \n\
397  mov %%d15,%3 \n\
398  insert %0,%1,%2,%%e14"
399  :"=d"(res):"d"(a), "d"(b), "d"(p), "d"(w):"d14", "d15");
400  return res;
401 }
402 
403 /** Return trg but replace trgbit by inverse of srcbit in src.
404  */
405 IFX_INLINE sint32 __insn(sint32 trg, const sint32 trgbit, sint32 src, const sint32 srcbit)
406 {
407  sint32 res;
408  __asm__ volatile ("insn.t %0,%1,%2,%3,%4":"=d"(res):"d"(trg), "i"(trgbit), "d"(src), "i"(srcbit));
409  return res;
410 }
411 
412 /** Store a single bit.
413  */
414 #define __putbit(value,address,bitoffset ) __imaskldmst(address, value, bitoffset,1)
415 
416 /** \} */
417 
418 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucinterrupt_handling Interrupt Handling
419  The next table provides an overview of the intrinsic functions to read or set interrupt handling.
420  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
421  * \{
422  */
423 
424 #if !IFXCPU_INTRINSICSGNUC_USE_MACHINE_INTRINSICS
425 /** Set CPU priority number [0..255] (or [0..1023] for TriCore 1.6.x) and enable interrupts immediately at function entry
426  */
427 #define __bisr(intlvl) __asm__ volatile ("bisr "#intlvl : : : "memory")
428 #endif
429 
430 /** Disable interrupts. Only supported for TriCore1
431  */
432 #define __disable() __asm__ volatile ("disable" : : : "memory")
433 
434 /** Disable interrupts and return previous interrupt state (enabled or disabled). Direct supported for TriCore1.6. Emulated on TC1.3.1
435  */
437 {
438  sint32 res;
439  __asm__ volatile("disable %0":"=d"(res));
440  return res;
441 }
442 
443 /** Enable interrupts immediately at function entry
444  */
445 #define __enable() __asm__ volatile ("enable" : : : "memory")
446 
447 /** Restore interrupt state. Direct supported for TriCore1.6. Emulated on TC1.3.1
448  */
450 {
451  __asm__ volatile ("restore %0"::"d"(ie));
452 }
453 
454 #if !IFXCPU_INTRINSICSGNUC_USE_MACHINE_INTRINSICS
455 /** Call a system call function number
456  */
457 #define __syscall(svcno) __tric_syscall(svcno)
458 #define __tric_syscall(svcno) __asm__ volatile ("syscall "STRINGIFY(svcno) : : : "memory")
459 #endif
460 
461 /** \} */
462 
463 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucmiscellaneous Miscellaneous Intrinsic Functions
464  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
465  * \{
466  */
467 
468 /** Write back and invalidate cache address "p". Generates CACHEA.WI [Ab].
469  */
471 {
472  __asm__ volatile("cachea.wi [%0]0"::"a"(p));
473 }
474 /** Write back and invalidate cache index "p". Generates CACHEI.WI [Ab].
475  */
477 {
478  __asm__ volatile("cachei.wi [%0]0"::"a"(p));
479 }
480 
481 /** Write back and invalidate cache address \"p\" and return post incremented
482  * value of \"p\". Generates CACHEA.WI [Ab+].
483  */
485 {
486  __asm__ volatile("cachea.wi [%0+]0"::"a"(p));
487  return p;
488 }
489 
490 /** Multiply two 32-bit numbers to an intermediate 64-bit result, and scale
491  * back the result to 32 bits. To scale back the result, 32 bits are extracted
492  * from the intermediate 64-bit result: bit 63-offset to bit 31-offset.
493  */
495 {
496  sint32 res;
497  __asm__ volatile("mul %%e12,%1,%2 \n\
498  dextr %0,%%d13,%%d12,%3"
499  :"=d"(res):"d"(a), "d"(b), "d"(offset):"d12", "d13");
500  return res;
501 }
502 
503 /** Rotate operand left count times. The bits that are shifted out are inserted at the right side (bit 31 is shifted to bit 0).
504  */
506 {
507  uint32 res;
508  __asm__ volatile("dextr %0,%1,%1,%2":"=d"(res):"d"(operand), "d"(count):"memory");
509  return res;
510 }
511 
512 /** Rotate operand right count times. The bits that are shifted out are inserted at the left side (bit 0 is shifted to bit 31).
513  */
515 {
516  uint32 res;
517  __asm__ volatile("rsub %2,%2,0 \n\
518  dextr %0,%1,%1,%2"
519  :"=d"(res):"d"(operand), "d"(count):"memory");
520  return res;
521 }
522 /** \} */
523 
524 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucpacked Packed Data Type Support
525  The next table provides an overview of the intrinsic functions for initialization of packed data type.
526  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
527  * \{
528  */
529 
530 /** Absolute value of __packb
531  */
533 {
534  __packb res;
535  __asm__ volatile ("abs.b %0,%1"
536  :"=d"(res):"d"(a):"memory");
537  return res;
538 }
539 
540 /** Absolute value of __packhw
541  */
543 {
544  __packhw res;
545  __asm__ volatile ("abs.h %0,%1"
546  :"=d"(res):"d"(a):"memory");
547  return res;
548 }
549 
550 /** Absolute value of __packhw using saturation
551  */
553 {
554  __packb res;
555  __asm__ volatile ("abss.h %0,%1"
556  :"=d"(res):"d"(a):"memory");
557  return res;
558 }
559 
560 /** Extract first byte from a __packb
561  */
563 {
564  sint8 res;
565  __asm__ volatile ("extr %0,%1,0,8"
566  :"=d"(res):"d"(a):"memory");
567  return res;
568 }
569 
570 /** Extract second byte from a __packb
571  */
573 {
574  sint8 res;
575  __asm__ volatile ("extr %0,%1,8,8"
576  :"=d"(res):"d"(a):"memory");
577  return res;
578 }
579 
580 /** Extract third byte from a __packb
581  */
583 {
584  sint8 res;
585  __asm__ volatile ("extr %0,%1,16,8"
586  :"=d"(res):"d"(a):"memory");
587  return res;
588 }
589 
590 /** Extract fourth byte from a __packb
591  */
593 {
594  sint8 res;
595  __asm__ volatile ("extr %0,%1,24,8"
596  :"=d"(res):"d"(a):"memory");
597  return res;
598 }
599 
600 /** Extract first sint16 from a __packhw
601  */
603 {
604  sint16 res;
605  __asm__ volatile ("extr %0,%1,0,16"
606  :"=d"(res):"d"(a):"memory");
607  return res;
608 }
609 
610 /** Extract second sint16 from a __packhw
611  */
613 {
614  sint16 res;
615  __asm__ volatile ("extr %0,%1,16,16"
616  :"=d"(res):"d"(a):"memory");
617  return res;
618 }
619 
620 /** Extract first uint8 from a __packb
621  */
623 {
624  uint8 res;
625  __asm__ volatile ("extr %0,%1,0,8"
626  :"=d"(res):"d"(a):"memory");
627  return res;
628 }
629 
630 /** Extract second uint8 from a __packb
631  */
633 {
634  uint8 res;
635  __asm__ volatile ("extr %0,%1,8,8"
636  :"=d"(res):"d"(a):"memory");
637  return res;
638 }
639 
640 /** Extract third uint8 from a __packb
641  */
643 {
644  uint8 res;
645  __asm__ volatile ("extr %0,%1,16,8"
646  :"=d"(res):"d"(a):"memory");
647  return res;
648 }
649 
650 /** Extract fourth uint8 from a __packb
651  */
653 {
654  uint8 res;
655  __asm__ volatile ("extr %0,%1,24,8"
656  :"=d"(res):"d"(a):"memory");
657  return res;
658 }
659 
660 /** Extract first uint16 from a __packhw
661  */
663 {
664  uint16 res;
665  __asm__ volatile ("extr %0,%1,0,16"
666  :"=d"(res):"d"(a):"memory");
667  return res;
668 }
669 
670 /** Extract second uint16 from a __packhw
671  */
673 {
674  uint16 res;
675  __asm__ volatile ("extr %0,%1,16,16"
676  :"=d"(res):"d"(a):"memory");
677  return res;
678 }
679 
680 /** Extract first byte from a __packb
681  */
683 {
684  sint8 res;
685  __asm__ volatile ("ld.w %0,[%1]0 \n\
686  extr %0,%0,0,8"
687  :"=d"(res):"a"(a):"memory");
688  return res;
689 
690 }
691 
692 /** Extract second byte from a __packb
693  */
695 {
696  sint8 res;
697  __asm__ volatile ("ld.w %0,[%1]0 \n\
698  extr %0,%0,8,8"
699  :"=d"(res):"a"(a):"memory");
700  return res;
701 
702 }
703 
704 /** Extract third byte from a __packb
705  */
707 {
708  sint8 res;
709  __asm__ volatile ("ld.w %0,[%1]0 \n\
710  extr %0,%0,16,8"
711  :"=d"(res):"a"(a):"memory");
712  return res;
713 
714 }
715 
716 /** Extract fourth byte from a __packb
717  */
719 {
720  sint8 res;
721  __asm__ volatile ("ld.w %0,[%1]0 \n\
722  extr %0,%0,24,8"
723  :"=d"(res):"a"(a):"memory");
724  return res;
725 
726 }
727 
728 /** Extract first sint16 from a __packhw
729  */
731 {
732  sint16 res;
733  __asm__ volatile ("ld.w %0,[%1]0 \n\
734  extr %0,%0,0,16"
735  :"=d"(res):"a"(a):"memory");
736  return res;
737 }
738 /** Extract second sint16 from a __packhw
739  */
741 {
742  sint16 res;
743  __asm__ volatile ("ld.w %0,[%1]0 \n\
744  extr %0,%0,16,16"
745  :"=d"(res):"a"(a):"memory");
746  return res;
747 }
748 
749 /** Extract first uint8 from a __packb
750  */
752 {
753  uint8 res;
754  __asm__ volatile ("ld.w %0,[%1]0 \n\
755  extr %0,%0,0,8"
756  :"=d"(res):"a"(a):"memory");
757  return res;
758 }
759 
760 /** Extract second uint8 from a __packb
761  */
763 {
764  uint8 res;
765  __asm__ volatile ("ld.w %0,[%1]0 \n\
766  extr %0,%0,8,8"
767  :"=d"(res):"a"(a):"memory");
768  return res;
769 }
770 
771 /** Extract third uint8 from a __packb
772  */
774 {
775  uint8 res;
776  __asm__ volatile ("ld.w %0,[%1]0 \n\
777  extr %0,%0,16,8"
778  :"=d"(res):"a"(a):"memory");
779  return res;
780 }
781 
782 /** Extract fourth uint8 from a __packb
783  */
785 {
786  uint8 res;
787  __asm__ volatile ("ld.w %0,[%1]0 \n\
788  extr %0,%0,24,8"
789  :"=d"(res):"a"(a):"memory");
790  return res;
791 }
792 
793 /** Extract first uint16 from a __packhw
794  */
796 {
797  uint16 res;
798  __asm__ volatile ("ld.w %0,[%1]0 \n\
799  extr %0,%0,0,16"
800  :"=d"(res):"a"(a):"memory");
801  return res;
802 }
803 
804 /** Extract second uint16 from a __packhw
805  */
807 {
808  uint16 res;
809  __asm__ volatile ("ld.w %0,[%1]0 \n\
810  extr %0,%0,16,16"
811  :"=d"(res):"a"(a):"memory");
812  return res;
813 }
814 
815 /** Initialize __packb with four integers
816  */
818 {
819  __packb res;
820  __asm__ volatile ("insert %3,%3,%4,8,8 \n\
821  insert %4,%1,%2,8,8 \n\
822  insert %0,%4,%3,16,16 "
823  :"=d"(res):"d"(a), "d"(b), "d"(c), "d"(d):"memory");
824  return res;
825 }
826 
827 /** Initialize __packb with a long integer
828  */
830 {
831  return (__packb) a;
832 }
833 
834 /** Initialize __packhw with two integers
835  */
837 {
838  __packhw res;
839  __asm__ volatile ("insert %0,%1,%2,16,16"
840  :"=d"(res):"d"(a), "d"(b):"memory");
841  return res;
842 }
843 
844 /** Initialize __packhw with a long integer
845  */
847 {
848  return a;
849 }
850 
851 /** Initialize __packb with four unsigned integers
852  */
854 {
855  __upackb res;
856  __asm__ volatile ("insert %3,%3,%4,8,8 \n\
857  insert %1,%1,%2,8,8 \n\
858  insert %0,%1,%3,16,16"
859  :"=d"(res):"d"(a), "d"(b), "d"(c), "d"(d):"memory");
860  return res;
861 }
862 
863 /** Initialize __packhw with two unsigned integers
864  */
866 {
867  __upackhw res;
868  __asm__ volatile ("insert %0,%1,%2,16,16"
869  :"=d"(res):"d"(a), "d"(b):"memory");
870  return res;
871 }
872 
873 /** Insert sint8 into first byte of a __packb
874  */
876 {
877  __packb res;
878  __asm__ volatile ("insert %0,%1,%2,0,8"
879  :"=d"(res):"d"(a), "d"(b):"memory");
880  return res;
881 }
882 
883 /** Insert sint8 into second byte of a __packb
884  */
886 {
887  __packb res;
888  __asm__ volatile ("insert %0,%1,%2,8,8"
889  :"=d"(res):"d"(a), "d"(b):"memory");
890  return res;
891 }
892 
893 /** Insert sint8 into third byte of a __packb
894  */
896 {
897  __packb res;
898  __asm__ volatile ("insert %0,%1,%2,16,8"
899  :"=d"(res):"d"(a), "d"(b):"memory");
900  return res;
901 }
902 
903 /** Insert sint8 into fourth byte of a __packb
904  */
906 {
907  __packb res;
908  __asm__ volatile ("insert %0,%1,%2,24,8"
909  :"=d"(res):"d"(a), "d"(b):"memory");
910  return res;
911 }
912 
913 /** Insert sint8 into first uint8 of a __packb
914  */
916 {
917  __upackb res;
918  __asm__ volatile ("insert %0,%1,%2,0,8"
919  :"=d"(res):"d"(a), "d"(b):"memory");
920  return res;
921 }
922 
923 /** Insert sint8 into second uint8 of a __packb
924  */
926 {
927  __upackb res;
928  __asm__ volatile ("insert %0,%1,%2,8,8"
929  :"=d"(res):"d"(a), "d"(b):"memory");
930  return res;
931 }
932 
933 /** Insert sint8 into third uint8 of a __packb
934  */
936 {
937  __upackb res;
938  __asm__ volatile ("insert %0,%1,%2,16,8"
939  :"=d"(res):"d"(a), "d"(b):"memory");
940  return res;
941 }
942 
943 /** Insert sint8 into fourth uint8 of a __packb
944  */
946 {
947  __upackb res;
948  __asm__ volatile ("insert %0,%1,%2,24,8"
949  :"=d"(res):"d"(a), "d"(b):"memory");
950  return res;
951 }
952 
953 /** Insert sint16 into first halfword of a __packhw
954  */
956 {
957  __packhw res;
958  __asm__ volatile ("insert %0,%1,%2,0,16"
959  :"=d"(res):"d"(a), "d"(b):"memory");
960  return res;
961 }
962 
963 /** Insert sint16 into second halfword of a __packhw
964  */
966 {
967  __packhw res;
968  __asm__ volatile ("insert %0,%1,%2,16,16"
969  :"=d"(res):"d"(a), "d"(b):"memory");
970  return res;
971 }
972 
973 /** Insert uint16 into first halfword of a __upackhw
974  */
976 {
977  __upackhw res;
978  __asm__ volatile ("insert %0,%1,%2,0,16"
979  :"=d"(res):"d"(a), "d"(b):"memory");
980  return res;
981 }
982 
983 /** Insert uint16 into second halfword of a __upackhw
984  */
986 {
987  __upackhw res;
988  __asm__ volatile ("insert %0,%1,%2,16,16"
989  :"=d"(res):"d"(a), "d"(b):"memory");
990  return res;
991 }
992 
993 /** Minimum of two __packb values
994  */
996 {
997  __packb res;
998  __asm__ volatile ("min.b %0,%1,%2"
999  :"=d"(res):"d"(a), "d"(b):"memory");
1000  return res;
1001 }
1002 
1003 /** Minimum of two __upackb values
1004  */
1006 {
1007  __upackb res;
1008  __asm__ volatile ("min.bu %0,%1,%2"
1009  :"=d"(res):"d"(a), "d"(b):"memory");
1010  return res;
1011 }
1012 
1013 /** Minimum of two __packhw values
1014  */
1016 {
1017  __packhw res;
1018  __asm__ volatile ("min.h %0,%1,%2"
1019  :"=d"(res):"d"(a), "d"(b):"memory");
1020  return res;
1021 }
1022 
1023 /** Minimum of two __upackhw values
1024  */
1026 {
1027  __upackhw res;
1028  __asm__ volatile ("min.hu %0,%1,%2"
1029  :"=d"(res):"d"(a), "d"(b):"memory");
1030  return res;
1031 }
1032 
1033 /** Insert sint8 into first byte of a __packb
1034  */
1036 {
1037  __asm__ volatile ("ld.w %%d15,[%0] \n\
1038  insert %%d15,%%d15,%1,0,8 \n\
1039  st.w [%0],%%d15"
1040  ::"a"(a), "d"(b):"d15", "memory");
1041 }
1042 
1043 /** Insert sint8 into second byte of a __packb
1044  */
1046 {
1047  __asm__ volatile ("ld.w %%d15,[%0] \n\
1048  insert %%d15,%%d15,%1,8,8 \n\
1049  st.w [%0],%%d15"
1050  ::"a"(a), "d"(b):"d15", "memory");
1051 }
1052 
1053 /** Insert sint8 into third byte of a __packb
1054  */
1056 {
1057  __asm__ volatile ("ld.w %%d15,[%0] \n\
1058  insert %%d15,%%d15,%1,16,8 \n\
1059  st.w [%0],%%d15"
1060  ::"a"(a), "d"(b):"d15", "memory");
1061 }
1062 
1063 /** Insert sint8 into fourth byte of a __packb
1064  */
1066 {
1067  __asm__ volatile ("ld.w %%d15,[%0] \n\
1068  insert %%d15,%%d15,%1,24,8 \n\
1069  st.w [%0],%%d15"
1070  ::"a"(a), "d"(b):"d15", "memory");
1071 }
1072 
1073 /** Insert sint16 into first halfword of a __packhw
1074  */
1076 {
1077  __asm__ volatile ("ld.w %%d15,[%0] \n\
1078  insert %%d15,%%d15,%1,0,16 \n\
1079  st.w [%0],%%d15"
1080  ::"a"(a), "d"(b):"d15", "memory");
1081 }
1082 
1083 /** Insert sint16 into second halfword of a __packhw
1084  */
1086 {
1087  __asm__ volatile ("ld.w %%d15,[%0] \n\
1088  insert %%d15,%%d15,%1,16,16 \n\
1089  st.w [%0],%%d15"
1090  ::"a"(a), "d"(b):"d15", "memory");
1091 }
1092 
1093 /** Insert uint8 into first byte of a __upackb
1094  */
1096 {
1097  __asm__ volatile ("ld.w %%d15,[%0] \n\
1098  insert %%d15,%%d15,%1,0,8 \n\
1099  st.w [%0],%%d15"
1100  ::"a"(a), "d"(b):"d15", "memory");
1101 }
1102 
1103 /** Insert uint8 into second byte of a __upackb
1104  */
1106 {
1107  __asm__ volatile ("ld.w %%d15,[%0] \n\
1108  insert %%d15,%%d15,%1,8,8 \n\
1109  st.w [%0],%%d15"
1110  ::"a"(a), "d"(b):"d15", "memory");
1111 }
1112 
1113 /** Insert uint8 into third byte of a __upackb
1114  */
1116 {
1117  __asm__ volatile ("ld.w %%d15,[%0] \n\
1118  insert %%d15,%%d15,%1,16,8 \n\
1119  st.w [%0],%%d15"
1120  ::"a"(a), "d"(b):"d15", "memory");
1121 }
1122 
1123 /** Insert uint8 into fourth byte of a __upackb
1124  */
1126 {
1127  __asm__ volatile ("ld.w %%d15,[%0] \n\
1128  insert %%d15,%%d15,%1,24,8 \n\
1129  st.w [%0],%%d15"
1130  ::"a"(a), "d"(b):"d15", "memory");
1131 }
1132 
1133 /** Insert uint16 into first halfword of a __upackhw
1134  */
1136 {
1137  __asm__ volatile ("ld.w %%d15,[%0] \n\
1138  insert %%d15,%%d15,%1,0,16 \n\
1139  st.w [%0],%%d15"
1140  ::"a"(a), "d"(b):"d15", "memory");
1141 }
1142 
1143 /** Insert uint16 into second halfword of a __upackhw
1144  */
1146 {
1147  __asm__ volatile ("ld.w %%d15,[%0] \n\
1148  insert %%d15,%%d15,%1,16,16 \n\
1149  st.w [%0],%%d15"
1150  ::"a"(a), "d"(b):"d15", "memory");
1151 }
1152 
1153 /** \} */
1154 
1155 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucregister Register Handling
1156  The next table provides an overview of the intrinsic functions that you can use to access control registers.
1157  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
1158  * \{
1159  */
1160 
1161 /** Return absolute value
1162  */
1163 #define __abs(a) __builtin_abs(a)
1164 
1165 /** Return absolue difference of two integers
1166  */
1168 {
1169  sint32 res;
1170  __asm__ volatile ("absdif %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
1171  return res;
1172 }
1173 
1174 /** Return absolute value with saturation
1175  */
1177 {
1178  sint32 res;
1179  __asm__ volatile ("abss %0, %1": "=d" (res) : "d" (a));
1180  return res;
1181 }
1182 
1183 /** Count leading ones in int
1184  */
1186 {
1187  sint32 res;
1188  __asm__ volatile ("clo %0,%1":"=d"(res):"d"(a));
1189  return res;
1190 }
1191 
1192 /** Count number of redundant sign bits (all consecutive bits with the same value as bit 31
1193  */
1195 {
1196  sint32 res;
1197  __asm__ volatile ("cls %0,%1":"=d"(res):"d"(a));
1198  return res;
1199 }
1200 
1201 /** Count leading zeros in int
1202  */
1203 #define __clz(a) __builtin_clz(a)
1204 
1205 /** Return absolute double precision floating-point value
1206  */
1207 IFX_INLINE double __fabs(double d)
1208 {
1209  double res;
1210  __asm__ volatile ("insert %0,%1,0,31,1": "=d" (res) : "d" (d):"memory");
1211  return res;
1212 }
1213 
1214 /** Return absolute floating-point value
1215  */
1216 IFX_INLINE float __fabsf(float f)
1217 {
1218  float res;
1219  __asm__ volatile ("insert %0,%1,0,31,1": "=d" (res) : "d" (f):"memory");
1220  return res;
1221 }
1222 
1223 #if !IFXCPU_INTRINSICSGNUC_USE_MACHINE_INTRINSICS
1224 /** Move contents of the addressed core SFR into a data register
1225  */
1226 #define __mfcr(regaddr) \
1227  ({ sint32 res; __asm__ volatile ("mfcr %0,%1": "=d" (res) :"i"(regaddr): "memory"); res; })
1228 
1229 //({ sint32 res; __asm__ volatile ("mfcr %0,"#regaddr : "=d" (res) : : "memory"); res; })
1230 
1231 /** Move contents of a data register (second int) to the addressed core SFR (first int)
1232  */
1233 #define __mtcr(regaddr,val) __asm__ volatile ("mtcr %0,%1\n\tisync"::"i"(regaddr),"d"(val):"memory")
1234 #endif
1235 
1236 /** Return parity
1237  */
1239 {
1240  sint32 res;
1241  __asm__ volatile ("parity %0,%1": "=d" (res) : "d" (a):"memory");
1242  return res;
1243 }
1244 
1245 /** Return saturated byte
1246  */
1248 {
1249  sint8 res;
1250  __asm__ volatile ("sat.b %0,%1":"=d"(res):"d"(a));
1251  return res;
1252 }
1253 
1254 /** Return saturated uint8
1255  */
1257 {
1258  uint8 res;
1259  __asm__ volatile ("sat.bu %0,%1":"=d"(res):"d"(a));
1260  return res;
1261 }
1262 
1263 /** Return saturated halfword
1264  */
1266 {
1267  sint8 res;
1268  __asm__ volatile ("sat.h %0,%1":"=d"(res):"d"(a));
1269  return res;
1270 }
1271 
1272 /** Return saturated unsigned halfword
1273  */
1275 {
1276  sint8 res;
1277  __asm__ volatile ("sat.hu %0,%1":"=d"(res):"d"(a));
1278  return res;
1279 }
1280 
1281 /** \} */
1282 
1283 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucsaturation Saturation Arithmetic Support
1284  These intrinsics support saturation arithmetic
1285  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
1286  * \{
1287  */
1288 
1289 /** add signed with saturation
1290  */
1292 {
1293  sint32 res;
1294  __asm__ volatile ("adds %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
1295  return res;
1296 }
1297 
1298 /** add unsigned with saturation
1299  */
1301 {
1302  uint32 res;
1303  __asm__ volatile ("adds.u %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
1304  return res;
1305 }
1306 
1307 /** substract signed with saturation
1308  */
1310 {
1311  sint32 res;
1312  __asm__ volatile ("subs %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
1313  return res;
1314 }
1315 
1316 /** substract unsigned with saturation
1317  */
1319 {
1320  uint32 res;
1321  __asm__ volatile ("subs.u %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
1322  return res;
1323 }
1324 
1325 /** \} */
1326 
1327 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucsingle_assembly Insert Single Assembly Instruction
1328  The next table provides an overview of the intrinsic functions that you can use to insert a single assembly
1329  instruction.You can also use inline assembly but these intrinsics provide a shorthand for frequently used
1330  assembly instructions.
1331  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
1332  * \{
1333  */
1334 
1335 /** Insert DEBUG instruction
1336  */
1338 {
1339  __asm__ volatile ("debug" : : : "memory");
1340 }
1341 
1342 /** Insert DSYNC instruction
1343  */
1345 {
1346  __asm__ volatile ("dsync" : : : "memory");
1347 }
1348 
1349 /** Insert ISYNC instruction
1350  */
1352 {
1353  __asm__ volatile ("isync" : : : "memory");
1354 }
1355 
1356 /** Insert LDMST instruction. Note that all operands must be word-aligned.
1357  */
1358 IFX_INLINE void __ldmst(volatile void* address, uint32 mask, uint32 value)
1359 {
1360  __asm__ volatile("mov %H2,%1 \n\
1361  ldmst [%0]0,%A2"
1362  ::"a"(address), "d"(mask), "d"((long long)value));
1363 }
1364 
1365 /** Insert NOP instruction
1366  */
1367 IFX_INLINE void __nop(void)
1368 {
1369  __asm__ volatile ("nop" : : : "memory");
1370 }
1371 
1372 /** Insert a loop over cnt NOP instruction
1373  */
1374 IFX_INLINE void __nops(void* cnt)
1375 {
1376  __asm__ volatile ("0: nop \n\
1377  loop %0,0b"
1378  ::"a"(((sint8*)cnt)-1));
1379 }
1380 
1381 /** Insert RSLCX instruction
1382  */
1384 {
1385  __asm__ volatile ("rslcx" : : : "memory");
1386 }
1387 
1388 /** Insert SVLCX instruction
1389  */
1391 {
1392  __asm__ volatile ("svlcx" : : : "memory");
1393 }
1394 
1395 /** Insert SWAP instruction. Note that all operands must be word-aligned.
1396  */
1397 IFX_INLINE uint32 __swap(void* place, uint32 value)
1398 {
1399  uint32 res;
1400  __asm__ volatile("swap.w [%1]0,%2":"=d"(res):"a"(place), "0"(value));
1401  return res;
1402 }
1403 
1404 /** Insert n NOP instruction
1405  */
1406 #define NOP(n) __asm(".rept " #n "\n\tnop\n\t.endr\n")
1407 
1408 /** \} */
1409 
1410 /* FIXME use inline instead of #define */
1411 #define __extru(src,start,size) \
1412  ({ sint32 res; asm volatile (" extr.u\t %0,%1,%2,%3" : "=d" (res) : \
1413  "d" (src),"i" (start),"i" (size) : "memory"); res; })
1414 
1415 /* FIXME use inline instead of #define */
1416 #define __setareg(areg,val) \
1417  { uint32 reg_val= (uint32)val; \
1418  asm volatile (" mov.a\t %%"#areg",%0"::"d"(reg_val)); }
1419 
1420 /**__mtcr (CPU_CCTRL, 0);
1421 */
1423 {
1424  __asm__ volatile("mov %%d0,0\n\
1425  mtcr 0xFC00,%%d0\n\
1426  isync\n"
1427  : : :"d0"); /* FIXME check that the parameter d0 is understood by the compiler as a register used by the inline */
1428 }
1429 
1430 /** \brief This function is a implementation of a binary semaphore using compare and swap instruction
1431  * \param address address of resource.
1432  * \param value This variable is updated with status of address
1433  * \param condition if the value of address matches with the value of condition, then swap of value & address occurs.
1434  *
1435  */
1436 IFX_INLINE unsigned int __cmpAndSwap (unsigned int volatile *address,
1437  unsigned int value, unsigned int condition)
1438 {
1439  /* Gnu C compiler with Tricore 1.6 support is required to use cmpswap instruction */
1440 #ifdef IFX_USE_GNUC_TRICORE_1_6
1441  __extension__ unsigned long long reg64
1442  = value | (unsigned long long) condition << 32;
1443 
1444  __asm__ __volatile__ ("cmpswap.w [%[addr]]0, %A[reg]"
1445  : [reg] "+d" (reg64)
1446  : [addr] "a" (address)
1447  : "memory");
1448  return reg64;
1449 #else
1450  sint32 ie;
1451  uint32 retval = 1;
1452  ie = __disable_and_save();
1453  if (condition == *address)
1454  {
1455  __swap((void *)address,value );
1456  retval = 0;
1457  }
1458  __restore(ie);
1459  return retval;
1460 #endif
1461 }
1462 
1463 /** \brief Convert a fixpoint value to float32
1464  *
1465  * This function converts a value from a fixpoint format to a float32 format.
1466  *
1467  *
1468  * \param value value to be converted.
1469  * \param shift position of the fix point. Range = [-256, 255] => (Qx.y format where x = shift+1).
1470  *
1471  * \return Returns the converted value in the float32 format.
1472  *
1473  */
1475 {
1476  float32 result;
1477 
1478  __asm__ volatile("q31tof %0, %1, %2": "=d" (result) : "d" (value), "d" (shift));
1479  return result;
1480 }
1481 
1483 {
1484  uint32 *res;
1485  __asm__ volatile ("mov.aa %0, %%a11": "=a" (res) : :"a11");
1486  return res;
1487 }
1488 
1489 IFX_INLINE void __setStackPointer(void *stackAddr)
1490 {
1491  __asm__ volatile ("mov.aa %%a10, %0": : "a" (stackAddr) :"a10");
1492 }
1493 
1494 /******************************************************************************/
1495 /* *INDENT-ON* */
1496 #endif /* IFXCPU_INTRINSICSGNUC_H */