diff -rupN gcc/config/avr/avr.c gcc/config/avr/avr.c
--- gcc/config/avr/avr.c 2010-04-02 14:54:46.000000000 -0500
+++ gcc/config/avr/avr.c 2010-09-21 14:31:30.000000000 -0500
@@ -192,6 +192,19 @@ static const struct attribute_spec avr_a
#undef TARGET_CAN_ELIMINATE
#define TARGET_CAN_ELIMINATE avr_can_eliminate
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P avr_scalar_mode_supported_p
+
+ /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
+ static bool
+ avr_scalar_mode_supported_p (enum machine_mode mode)
+ {
+ if (ALL_FIXED_POINT_MODE_P (mode))
+ return true;
+
+ return default_scalar_mode_supported_p (mode);
+ }
+
struct gcc_target targetm = TARGET_INITIALIZER;
void
@@ -1609,9 +1622,9 @@ output_movqi (rtx insn, rtx operands[],
*l = 1;
- if (register_operand (dest, QImode))
+ if (register_operand (dest, VOIDmode))
{
- if (register_operand (src, QImode)) /* mov r,r */
+ if (register_operand (src, VOIDmode)) /* mov r,r */
{
if (test_hard_reg_class (STACK_REG, dest))
return AS2 (out,%0,%1);
@@ -1699,9 +1712,9 @@ output_movhi (rtx insn, rtx operands[],
if (!l)
l = &dummy;
- if (register_operand (dest, HImode))
+ if (register_operand (dest, VOIDmode))
{
- if (register_operand (src, HImode)) /* mov r,r */
+ if (register_operand (src, VOIDmode)) /* mov r,r */
{
if (test_hard_reg_class (STACK_REG, dest))
{
@@ -2424,6 +2437,14 @@ output_movsisf(rtx insn, rtx operands[],
{
if (test_hard_reg_class (LD_REGS, dest)) /* ldi d,i */
{
+ if (AVR_HAVE_MOVW
+ && (UINTVAL (src) >> 16) == (UINTVAL (src) & 0xffff))
+ {
+ *l = 3;
+ return (AS2 (ldi,%A0,lo8(%1)) CR_TAB
+ AS2 (ldi,%B0,hi8(%1)) CR_TAB
+ AS2 (movw,%C0,%A0));
+ }
*l = 4;
return (AS2 (ldi,%A0,lo8(%1)) CR_TAB
AS2 (ldi,%B0,hi8(%1)) CR_TAB
@@ -4354,6 +4375,196 @@ avr_rotate_bytes (rtx operands[])
return true;
}
+/* Outputs instructions needed for fixed point conversion. */
+
+const char *
+fract_out (rtx insn ATTRIBUTE_UNUSED, rtx operands[], int intsigned, int *len)
+{
+ int i, k = 0;
+ int sbit[2], ilen[2], flen[2], tlen[2];
+ int rdest, rsource, offset;
+ int start, end, dir;
+ int hadbst = 0, hadlsl = 0;
+ int clrword = -1, lastclr = 0, clr = 0;
+ char buf[20];
+
+ if (!len)
+ len = &k;
+
+ for (i = 0; i < 2; i++)
+ {
+ enum machine_mode mode = GET_MODE (operands[i]);
+ tlen[i] = GET_MODE_SIZE (mode);
+ if (SCALAR_INT_MODE_P (mode))
+ {
+ sbit[i] = intsigned;
+ ilen[i] = GET_MODE_BITSIZE(mode) / 8;
+ flen[i] = 0;
+ }
+ else if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
+ {
+ sbit[i] = SIGNED_SCALAR_FIXED_POINT_MODE_P (mode);
+ ilen[i] = (GET_MODE_IBIT (mode) + 1) / 8;
+ flen[i] = (GET_MODE_FBIT (mode) + 1) / 8;
+ }
+ else
+ fatal_insn ("unsupported fixed-point conversion", insn);
+ }
+
+ rdest = true_regnum (operands[0]);
+ rsource = true_regnum (operands[1]);
+ offset = flen[1] - flen[0];
+
+ /* Store the sign bit if the destination is a signed
+ fract and the source has a sign in the integer part. */
+ if (sbit[0] && !ilen[0] && sbit[1] && ilen[1])
+ {
+ /* To avoid using bst and bld if the source and
+ destination registers overlap we can use a single lsl
+ since we don't care about preserving the source register. */
+ if (rdest < rsource + tlen[1] && rdest + tlen[0] > rsource)
+ {
+ sprintf (buf, "lsl r%d", rsource + tlen[1] - 1);
+ hadlsl = 1;
+ }
+ else
+ {
+ sprintf (buf, "bst r%d, 7", rsource + tlen[1] - 1);
+ hadbst = 1;
+ }
+ output_asm_insn (buf, operands);
+ ++*len;
+ }
+
+ /* Pick the correct direction. */
+ if (rdest < rsource + offset)
+ {
+ dir = 1;
+ start = 0;
+ end = tlen[0];
+ }
+ else
+ {
+ dir = -1;
+ start = tlen[0] - 1;
+ end = -1;
+ }
+
+ /* Move registers into place, clearing registers that do not overlap. */
+ for (i = start; i != end; i += dir)
+ {
+ int destloc = rdest + i, sourceloc = rsource + i + offset;
+ if (sourceloc < rsource || sourceloc >= rsource + tlen[1])
+ {
+ if (AVR_HAVE_MOVW && i+dir != end
+ && (sourceloc+dir < rsource || sourceloc+dir >= rsource + tlen[1])
+ && ((dir == 1 && !(destloc%2) && !(sourceloc%2))
+ || (dir == -1 && (destloc%2) && (sourceloc%2)))
+ && clrword != -1)
+ {
+ sprintf (buf, "movw r%d, r%d", destloc&0xfe, clrword&0xfe);
+ i += dir;
+ }
+ else
+ {
+ /* Do not clear the register if it is going to get
+ sign extended with a mov later. */
+ if (sbit[0] && sbit[1] && i != tlen[0] - 1 && i >= flen[0])
+ continue;
+
+ sprintf (buf, "clr r%d", destloc);
+ if (lastclr)
+ clrword = destloc;
+ clr=1;
+ }
+ }
+ else if (destloc == sourceloc)
+ continue;
+ else
+ if (AVR_HAVE_MOVW && i+dir != end
+ && sourceloc+dir >= rsource && sourceloc+dir < rsource + tlen[1]
+ && ((dir == 1 && !(destloc%2) && !(sourceloc%2))
+ || (dir == -1 && (destloc%2) && (sourceloc%2))))
+ {
+ sprintf (buf, "movw r%d, r%d", destloc&0xfe, sourceloc&0xfe);
+ i += dir;
+ }
+ else
+ sprintf (buf, "mov r%d, r%d", destloc, sourceloc);
+
+ output_asm_insn (buf, operands);
+ ++*len;
+
+ lastclr = clr;
+ clr = 0;
+ }
+
+ /* Perform sign extension if needed. */
+ if (sbit[0] && sbit[1] && ilen[0] > ilen[1])
+ {
+ sprintf (buf, "sbrc r%d, 7", rdest+tlen[1]-1-offset);
+ output_asm_insn (buf, operands);
+ sprintf (buf, "com r%d", rdest+tlen[0]-1);
+ output_asm_insn (buf, operands);
+ *len += 2;
+ /* Sign extend additional bytes. */
+ start = rdest + tlen[0] - 2;
+ end = rdest + flen[0] + ilen[1] - 1;
+ for (i = start; i != end; i--)
+ {
+ if (AVR_HAVE_MOVW && i != start && i-1 != end)
+ sprintf (buf, "movw r%d, r%d", --i, rdest+tlen[0]-2);
+ else
+ sprintf (buf, "mov r%d, r%d", i, rdest+tlen[0]-1);
+ output_asm_insn (buf, operands);
+ ++*len;
+ }
+ }
+
+ /* Perform shifts, only needed if one operand
+ is a signed fract, and the other is not. */
+ if (sbit[0] && !ilen[0] && (!sbit[1] || ilen[1]))
+ {
+ start = rdest+flen[0]-1;
+ end = rdest + flen[0] - flen[1];
+ if (end < rdest)
+ end = rdest;
+ for (i = start; i >= end; i--)
+ {
+ if (i == start && !hadlsl)
+ sprintf (buf, "lsr r%d", i);
+ else
+ sprintf (buf, "ror r%d", i);
+ output_asm_insn (buf, operands);
+ ++*len;
+ }
+
+ if (hadbst)
+ {
+ sprintf (buf, "bld r%d, 7", rdest + tlen[0] - 1);
+ output_asm_insn (buf, operands);
+ ++*len;
+ }
+ }
+ else if (sbit[1] && !ilen[1] && (!sbit[0] || ilen[0]))
+ {
+ start = rdest + flen[0] - flen[1];
+ if (start < rdest)
+ start = rdest;
+ for (i = start; i.
+
+(define_mode_iterator ALLQQ [(QQ "") (UQQ "")])
+(define_mode_iterator ALLHQ [(HQ "") (UHQ "")])
+(define_mode_iterator ALLHA [(HA "") (UHA "")])
+(define_mode_iterator ALLHQHA [(HQ "") (UHQ "") (HA "") (UHA "")])
+(define_mode_iterator ALLSA [(SA "") (USA "")])
+
+;;; Conversions
+
+(define_mode_iterator FIXED1 [(QQ "") (UQQ "") (HQ "") (UHQ "")
+ (SQ "") (USQ "") (DQ "") (UDQ "")
+ (HA "") (UHA "") (SA "") (USA "")
+ (DA "") (UDA "") (TA "") (UTA "")
+ (QI "") (HI "") (SI "") (DI "")])
+(define_mode_iterator FIXED2 [(QQ "") (UQQ "") (HQ "") (UHQ "")
+ (SQ "") (USQ "") (DQ "") (UDQ "")
+ (HA "") (UHA "") (SA "") (USA "")
+ (DA "") (UDA "") (TA "") (UTA "")
+ (QI "") (HI "") (SI "") (DI "")])
+
+(define_insn "fract2"
+ [(set (match_operand:FIXED1 0 "register_operand" "=r")
+ (fract_convert:FIXED1 (match_operand:FIXED2 1 "register_operand" "r")))]
+ ""
+ "* return fract_out (insn, operands, 1, NULL);"
+ [(set_attr "cc" "clobber")])
+
+(define_insn "fractuns2"
+ [(set (match_operand:FIXED1 0 "register_operand" "=r")
+ (unsigned_fract_convert:FIXED1 (match_operand:FIXED2 1 "register_operand" "r")))]
+ ""
+ "* return fract_out (insn, operands, 0, NULL);"
+ [(set_attr "cc" "clobber")])
+
+;;; Addition/Subtraction, mostly identical to integer versions
+
+(define_insn "add3"
+ [(set (match_operand:ALLQQ 0 "register_operand" "=r,d")
+ (plus:ALLQQ (match_operand:ALLQQ 1 "register_operand" "%0,0")
+ (match_operand:ALLQQ 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ add %0,%2
+ subi %0,lo8(-(%2))"
+ [(set_attr "length" "1,1")
+ (set_attr "cc" "set_czn,set_czn")])
+
+(define_insn "sub3"
+ [(set (match_operand:ALLQQ 0 "register_operand" "=r,d")
+ (minus:ALLQQ (match_operand:ALLQQ 1 "register_operand" "0,0")
+ (match_operand:ALLQQ 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ sub %0,%2
+ subi %0,lo8(%2)"
+ [(set_attr "length" "1,1")
+ (set_attr "cc" "set_czn,set_czn")])
+
+
+(define_insn "add3"
+ [(set (match_operand:ALLHQHA 0 "register_operand" "=r,d")
+ (plus:ALLHQHA (match_operand:ALLHQHA 1 "register_operand" "%0,0")
+ (match_operand:ALLHQHA 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ add %A0,%A2\;adc %B0,%B2
+ subi %A0,lo8(-(%2))\;sbci %B0,hi8(-(%2))"
+ [(set_attr "length" "2,2")
+ (set_attr "cc" "set_n,set_czn")])
+
+(define_insn "sub3"
+ [(set (match_operand:ALLHQHA 0 "register_operand" "=r,d")
+ (minus:ALLHQHA (match_operand:ALLHQHA 1 "register_operand" "0,0")
+ (match_operand:ALLHQHA 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ sub %A0,%A2\;sbc %B0,%B2
+ subi %A0,lo8(%2)\;sbci %B0,hi8(%2)"
+ [(set_attr "length" "2,2")
+ (set_attr "cc" "set_czn,set_czn")])
+
+(define_insn "add3"
+ [(set (match_operand:ALLSA 0 "register_operand" "=r,d")
+ (plus:ALLSA (match_operand:ALLSA 1 "register_operand" "%0,0")
+ (match_operand:ALLSA 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ add %A0,%A2\;adc %B0,%B2\;adc %C0,%C2\;adc %D0,%D2
+ subi %0,lo8(-(%2))\;sbci %B0,hi8(-(%2))\;sbci %C0,hlo8(-(%2))\;sbci %D0,hhi8(-(%2))"
+ [(set_attr "length" "4,4")
+ (set_attr "cc" "set_n,set_czn")])
+
+(define_insn "sub3"
+ [(set (match_operand:ALLSA 0 "register_operand" "=r,d")
+ (minus:ALLSA (match_operand:ALLSA 1 "register_operand" "0,0")
+ (match_operand:ALLSA 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ sub %0,%2\;sbc %B0,%B2\;sbc %C0,%C2\;sbc %D0,%D2
+ subi %A0,lo8(%2)\;sbci %B0,hi8(%2)\;sbci %C0,hlo8(%2)\;sbci %D0,hhi8(%2)"
+ [(set_attr "length" "4,4")
+ (set_attr "cc" "set_czn,set_czn")])
+
+;******************************************************************************
+; mul
+
+(define_insn "mulqq3"
+ [(set (match_operand:QQ 0 "register_operand" "=r")
+ (mult:QQ (match_operand:QQ 1 "register_operand" "a")
+ (match_operand:QQ 2 "register_operand" "a")))]
+ "AVR_HAVE_MUL"
+ "fmuls %1,%2\;mov %0,r1\;clr r1"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "muluqq3"
+ [(set (match_operand:UQQ 0 "register_operand" "=r")
+ (mult:UQQ (match_operand:UQQ 1 "register_operand" "r")
+ (match_operand:UQQ 2 "register_operand" "r")))]
+ "AVR_HAVE_MUL"
+ "mul %1,%2\;mov %0,r1\;clr r1"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+;; (reg:ALLHQ 20) not clobbered on the enhanced core.
+;; use registers from 16-23 so we can use fmuls
+;; All call-used registers clobbered otherwise - normal library call.
+(define_expand "mul3"
+ [(set (reg:ALLHQ 22) (match_operand:ALLHQ 1 "register_operand" ""))
+ (set (reg:ALLHQ 20) (match_operand:ALLHQ 2 "register_operand" ""))
+ (parallel [(set (reg:ALLHQ 18) (mult:ALLHQ (reg:ALLHQ 22) (reg:ALLHQ 20)))
+ (clobber (reg:ALLHQ 22))])
+ (set (match_operand:ALLHQ 0 "register_operand" "") (reg:ALLHQ 18))]
+ "AVR_HAVE_MUL"
+ "")
+
+(define_insn "*mul3_enh_call"
+ [(set (reg:ALLHQ 18) (mult:ALLHQ (reg:ALLHQ 22) (reg:ALLHQ 20)))
+ (clobber (reg:ALLHQ 22))]
+ "AVR_HAVE_MUL"
+ "%~call __mul3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; Special calls for with and without mul.
+(define_expand "mul3"
+ [(set (reg:ALLHA 22) (match_operand:ALLHA 1 "register_operand" ""))
+ (set (reg:ALLHA 20) (match_operand:ALLHA 2 "register_operand" ""))
+ (parallel [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 20)))
+ (clobber (reg:ALLHA 22))])
+ (set (match_operand:ALLHA 0 "register_operand" "") (reg:ALLHA 18))]
+ ""
+ "
+{
+ if (!AVR_HAVE_MUL)
+ {
+ emit_insn (gen_mul3_call (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "*mul3_enh"
+ [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 20)))
+ (clobber (reg:ALLHA 22))]
+ "AVR_HAVE_MUL"
+ "%~call __mul3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; Without multiplier, clobbers both inputs, and needs a separate output register
+(define_expand "mul3_call"
+ [(set (reg:ALLHA 24) (match_operand:ALLHA 1 "register_operand" ""))
+ (set (reg:ALLHA 22) (match_operand:ALLHA 2 "register_operand" ""))
+ (parallel [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 24)))
+ (clobber (reg:ALLHA 22))
+ (clobber (reg:ALLHA 24))])
+ (set (match_operand:ALLHA 0 "register_operand" "") (reg:ALLHA 18))]
+ "!AVR_HAVE_MUL"
+ "")
+
+(define_insn "*mul3_call"
+ [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 24)))
+ (clobber (reg:ALLHA 22))
+ (clobber (reg:ALLHA 24))]
+ "!AVR_HAVE_MUL"
+ "%~call __mul3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; On the enhanced core, don't clobber either input, and use a separate output,
+;; r2 is needed as a zero register since r1 is used for mul
+(define_expand "mul3"
+ [(set (reg:ALLSA 16) (match_operand:ALLSA 1 "register_operand" ""))
+ (set (reg:ALLSA 20) (match_operand:ALLSA 2 "register_operand" ""))
+ (parallel [(set (reg:ALLSA 24) (mult:ALLSA (reg:ALLSA 16) (reg:ALLSA 20)))
+ (clobber (reg:QI 15))])
+ (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 24))]
+ ""
+ "
+{
+ if (!AVR_HAVE_MUL)
+ {
+ emit_insn (gen_mul3_call (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "*mul3_enh"
+ [(set (reg:ALLSA 24) (mult:ALLSA (reg:ALLSA 16) (reg:ALLSA 20)))
+ (clobber (reg:QI 15))]
+ "AVR_HAVE_MUL"
+ "%~call __mul3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; Without multiplier, clobbers both inputs, needs a separate output, and also
+; needs two more scratch registers
+(define_expand "mul3_call"
+ [(set (reg:ALLSA 18) (match_operand:ALLSA 1 "register_operand" ""))
+ (set (reg:ALLSA 24) (match_operand:ALLSA 2 "register_operand" ""))
+ (parallel [(set (reg:ALLSA 14) (mult:ALLSA (reg:ALLSA 18) (reg:ALLSA 24)))
+ (clobber (reg:ALLSA 18))
+ (clobber (reg:ALLSA 24))
+ (clobber (reg:HI 22))])
+ (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 14))]
+ "!AVR_HAVE_MUL"
+ "")
+
+(define_insn "*mul3_call"
+ [(set (reg:ALLSA 14) (mult:ALLSA (reg:ALLSA 18) (reg:ALLSA 24)))
+ (clobber (reg:ALLSA 18))
+ (clobber (reg:ALLSA 24))
+ (clobber (reg:HI 22))]
+ "!AVR_HAVE_MUL"
+ "%~call __mul3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
+; div
+
+(define_code_iterator usdiv [udiv div]) ; do signed and unsigned in one shot
+
+(define_expand "3"
+ [(set (reg:ALLQQ 25) (match_operand:ALLQQ 1 "register_operand" ""))
+ (set (reg:ALLQQ 22) (match_operand:ALLQQ 2 "register_operand" ""))
+ (parallel [(set (reg:ALLQQ 24) (usdiv:ALLQQ (reg:ALLQQ 25) (reg:ALLQQ 22)))
+ (clobber (reg:ALLQQ 25))
+ (clobber (reg:QI 23))])
+ (set (match_operand:ALLQQ 0 "register_operand" "") (reg:ALLQQ 24))]
+ ""
+ "")
+
+(define_insn "*3_call"
+ [(set (reg:ALLQQ 24) (usdiv:ALLQQ (reg:ALLQQ 25) (reg:ALLQQ 22)))
+ (clobber (reg:ALLQQ 25))
+ (clobber (reg:QI 23))]
+ ""
+ "%~call __3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_expand "3"
+ [(set (reg:ALLHQHA 26) (match_operand:ALLHQHA 1 "register_operand" ""))
+ (set (reg:ALLHQHA 22) (match_operand:ALLHQHA 2 "register_operand" ""))
+ (parallel [(set (reg:ALLHQHA 24) (usdiv:ALLHQHA (reg:ALLHQHA 26) (reg:ALLHQHA 22)))
+ (clobber (reg:ALLHQHA 26))
+ (clobber (reg:QI 21))])
+ (set (match_operand:ALLHQHA 0 "register_operand" "") (reg:ALLHQHA 24))]
+ ""
+ "")
+
+(define_insn "*3_call"
+ [(set (reg:ALLHQHA 24) (usdiv:ALLHQHA (reg:ALLHQHA 26) (reg:ALLHQHA 22)))
+ (clobber (reg:ALLHQHA 26))
+ (clobber (reg:QI 21))]
+ ""
+ "%~call __3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; note the first parameter gets passed in already offset by 2 bytes
+(define_expand "3"
+ [(set (reg:ALLSA 24) (match_operand:ALLSA 1 "register_operand" ""))
+ (set (reg:ALLSA 18) (match_operand:ALLSA 2 "register_operand" ""))
+ (parallel [(set (reg:ALLSA 22) (usdiv:ALLSA (reg:ALLSA 24) (reg:ALLSA 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))])
+ (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 22))]
+ ""
+ "")
+
+(define_insn "*3_call"
+ [(set (reg:ALLSA 22) (usdiv:ALLSA (reg:ALLSA 24) (reg:ALLSA 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))]
+ ""
+ "%~call __3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+
+;; abs must be defined for fixed types for correct operation
+
+;; abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x)
+
+;; abs
+
+(define_insn "abs2"
+ [(set (match_operand:ALLQQ 0 "register_operand" "=r")
+ (abs:ALLQQ (match_operand:ALLQQ 1 "register_operand" "0")))]
+ ""
+ "sbrc %0,7
+ neg %0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "clobber")])
diff -rupN gcc/config/avr/avr.md gcc/config/avr/avr.md
--- gcc/config/avr/avr.md 2010-04-02 14:54:46.000000000 -0500
+++ gcc/config/avr/avr.md 2010-09-21 14:33:24.000000000 -0500
@@ -66,6 +66,15 @@
(include "predicates.md")
(include "constraints.md")
+; fixed-point instructions.
+(include "avr-fixed.md")
+(define_mode_iterator ALLQ [(QI "") (QQ "") (UQQ "")])
+(define_mode_iterator ALLH [(HI "") (HQ "") (UHQ "") (HA "") (UHA "")])
+(define_mode_iterator ALLS [(SI "") (SA "") (USA "")])
+(define_mode_iterator ALLQS [(QI "") (QQ "") (UQQ "")
+ (HI "") (HQ "") (UHQ "") (HA "") (UHA "")
+ (SI "") (SA "") (USA "")])
+
;; Condition code settings.
(define_attr "cc" "none,set_czn,set_zn,set_n,compare,clobber"
(const_string "none"))
@@ -181,9 +190,9 @@
})
-(define_insn "*pushqi"
- [(set (mem:QI (post_dec (reg:HI REG_SP)))
- (match_operand:QI 0 "reg_or_0_operand" "r,L"))]
+(define_insn "*push"
+ [(set (mem:ALLQ (post_dec (reg:HI REG_SP)))
+ (match_operand:ALLQ 0 "reg_or_0_operand" "r,L"))]
""
"@
push %0
@@ -191,18 +200,18 @@
[(set_attr "length" "1,1")])
-(define_insn "*pushhi"
- [(set (mem:HI (post_dec (reg:HI REG_SP)))
- (match_operand:HI 0 "reg_or_0_operand" "r,L"))]
+(define_insn "*push"
+ [(set (mem:ALLH (post_dec (reg:HI REG_SP)))
+ (match_operand:ALLH 0 "reg_or_0_operand" "r,L"))]
""
"@
push %B0\;push %A0
push __zero_reg__\;push __zero_reg__"
[(set_attr "length" "2,2")])
-(define_insn "*pushsi"
- [(set (mem:SI (post_dec (reg:HI REG_SP)))
- (match_operand:SI 0 "reg_or_0_operand" "r,L"))]
+(define_insn "*push"
+ [(set (mem:ALLS (post_dec (reg:HI REG_SP)))
+ (match_operand:ALLS 0 "reg_or_0_operand" "r,L"))]
""
"@
push %D0\;push %C0\;push %B0\;push %A0
@@ -228,21 +237,21 @@
;; are call-saved registers, and most of LD_REGS are call-used registers,
;; so this may still be a win for registers live across function calls.
-(define_expand "movqi"
- [(set (match_operand:QI 0 "nonimmediate_operand" "")
- (match_operand:QI 1 "general_operand" ""))]
+(define_expand "mov"
+ [(set (match_operand:ALLQ 0 "nonimmediate_operand" "")
+ (match_operand:ALLQ 1 "general_operand" ""))]
""
"/* One of the ops has to be in a register. */
- if (!register_operand(operand0, QImode)
- && ! (register_operand(operand1, QImode) || const0_rtx == operand1))
- operands[1] = copy_to_mode_reg(QImode, operand1);
+ if (!register_operand(operand0, mode)
+ && ! (register_operand(operand1, mode) || const0_rtx == operand1))
+ operands[1] = copy_to_mode_reg(mode, operand1);
")
-(define_insn "*movqi"
- [(set (match_operand:QI 0 "nonimmediate_operand" "=r,d,Qm,r,q,r,*r")
- (match_operand:QI 1 "general_operand" "rL,i,rL,Qm,r,q,i"))]
- "(register_operand (operands[0],QImode)
- || register_operand (operands[1], QImode) || const0_rtx == operands[1])"
+(define_insn "*mov"
+ [(set (match_operand:ALLQ 0 "nonimmediate_operand" "=r,d,Qm,r,q,r,*r")
+ (match_operand:ALLQ 1 "general_operand" "r,i,rL,Qm,r,q,i"))]
+ "(register_operand (operands[0],mode)
+ || register_operand (operands[1], mode) || const0_rtx == operands[1])"
"* return output_movqi (insn, operands, NULL);"
[(set_attr "length" "1,1,5,5,1,1,4")
(set_attr "cc" "none,none,clobber,clobber,none,none,clobber")])
@@ -274,17 +283,17 @@
;;============================================================================
;; move word (16 bit)
-(define_expand "movhi"
- [(set (match_operand:HI 0 "nonimmediate_operand" "")
- (match_operand:HI 1 "general_operand" ""))]
+(define_expand "mov"
+ [(set (match_operand:ALLH 0 "nonimmediate_operand" "")
+ (match_operand:ALLH 1 "general_operand" ""))]
""
"
{
/* One of the ops has to be in a register. */
- if (!register_operand(operand0, HImode)
- && !(register_operand(operand1, HImode) || const0_rtx == operands[1]))
+ if (!register_operand(operand0, mode)
+ && !(register_operand(operand1, mode) || const0_rtx == operands[1]))
{
- operands[1] = copy_to_mode_reg(HImode, operand1);
+ operands[1] = copy_to_mode_reg(mode, operand1);
}
}")
@@ -339,20 +348,20 @@
[(set_attr "length" "4")
(set_attr "cc" "none")])
-(define_insn "*movhi"
- [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,d,*r,q,r")
- (match_operand:HI 1 "general_operand" "rL,m,rL,i,i,r,q"))]
- "(register_operand (operands[0],HImode)
- || register_operand (operands[1],HImode) || const0_rtx == operands[1])"
+(define_insn "*mov"
+ [(set (match_operand:ALLH 0 "nonimmediate_operand" "=r,r,m,d,*r,q,r")
+ (match_operand:ALLH 1 "general_operand" "r,m,rL,i,i,r,q"))]
+ "(register_operand (operands[0],mode)
+ || register_operand (operands[1],mode) || const0_rtx == operands[1])"
"* return output_movhi (insn, operands, NULL);"
[(set_attr "length" "2,6,7,2,6,5,2")
(set_attr "cc" "none,clobber,clobber,none,clobber,none,none")])
(define_peephole2 ; movw
- [(set (match_operand:QI 0 "even_register_operand" "")
- (match_operand:QI 1 "even_register_operand" ""))
- (set (match_operand:QI 2 "odd_register_operand" "")
- (match_operand:QI 3 "odd_register_operand" ""))]
+ [(set (match_operand:ALLQ 0 "even_register_operand" "")
+ (match_operand:ALLQ 1 "even_register_operand" ""))
+ (set (match_operand:ALLQ 2 "odd_register_operand" "")
+ (match_operand:ALLQ 3 "odd_register_operand" ""))]
"(AVR_HAVE_MOVW
&& REGNO (operands[0]) == REGNO (operands[2]) - 1
&& REGNO (operands[1]) == REGNO (operands[3]) - 1)"
@@ -363,10 +372,10 @@
})
(define_peephole2 ; movw_r
- [(set (match_operand:QI 0 "odd_register_operand" "")
- (match_operand:QI 1 "odd_register_operand" ""))
- (set (match_operand:QI 2 "even_register_operand" "")
- (match_operand:QI 3 "even_register_operand" ""))]
+ [(set (match_operand:ALLQ 0 "odd_register_operand" "")
+ (match_operand:ALLQ 1 "odd_register_operand" ""))
+ (set (match_operand:ALLQ 2 "even_register_operand" "")
+ (match_operand:ALLQ 3 "even_register_operand" ""))]
"(AVR_HAVE_MOVW
&& REGNO (operands[2]) == REGNO (operands[0]) - 1
&& REGNO (operands[3]) == REGNO (operands[1]) - 1)"
@@ -379,26 +388,24 @@
;;==========================================================================
;; move double word (32 bit)
-(define_expand "movsi"
- [(set (match_operand:SI 0 "nonimmediate_operand" "")
- (match_operand:SI 1 "general_operand" ""))]
+(define_expand "mov"
+ [(set (match_operand:ALLS 0 "nonimmediate_operand" "")
+ (match_operand:ALLS 1 "general_operand" ""))]
""
"
{
/* One of the ops has to be in a register. */
- if (!register_operand (operand0, SImode)
- && !(register_operand (operand1, SImode) || const0_rtx == operand1))
+ if (!register_operand (operand0, mode)
+ && !(register_operand (operand1, mode) || const0_rtx == operand1))
{
- operands[1] = copy_to_mode_reg (SImode, operand1);
+ operands[1] = copy_to_mode_reg (mode, operand1);
}
}")
-
-
(define_peephole2 ; movsi_lreg_const
[(match_scratch:QI 2 "d")
- (set (match_operand:SI 0 "l_register_operand" "")
- (match_operand:SI 1 "immediate_operand" ""))
+ (set (match_operand:ALLS 0 "l_register_operand" "")
+ (match_operand:ALLS 1 "immediate_operand" ""))
(match_dup 2)]
"(operands[1] != const0_rtx
&& operands[1] != constm1_rtx)"
@@ -408,8 +415,8 @@
;; '*' because it is not used in rtl generation.
(define_insn "*reload_insi"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (match_operand:SI 1 "immediate_operand" "i"))
+ [(set (match_operand:ALLS 0 "register_operand" "=r")
+ (match_operand:ALLS 1 "immediate_operand" "i"))
(clobber (match_operand:QI 2 "register_operand" "=&d"))]
"reload_completed"
"* return output_reload_insisf (insn, operands, NULL);"
@@ -417,11 +424,11 @@
(set_attr "cc" "none")])
-(define_insn "*movsi"
- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,Qm,!d,r")
- (match_operand:SI 1 "general_operand" "r,L,Qm,rL,i,i"))]
- "(register_operand (operands[0],SImode)
- || register_operand (operands[1],SImode) || const0_rtx == operands[1])"
+(define_insn "*mov"
+ [(set (match_operand:ALLS 0 "nonimmediate_operand" "=r,r,r,Qm,!d,r")
+ (match_operand:ALLS 1 "general_operand" "r,L,Qm,rL,i,i"))]
+ "(register_operand (operands[0],mode)
+ || register_operand (operands[1],mode) || const0_rtx == operands[1])"
"* return output_movsisf (insn, operands, NULL);"
[(set_attr "length" "4,4,8,9,4,10")
(set_attr "cc" "none,set_zn,clobber,clobber,none,clobber")])
@@ -958,23 +965,54 @@
[(set_attr "type" "xcall")
(set_attr "cc" "clobber")])
-(define_insn "mulqihi3"
+;; Define code iterators
+(define_code_iterator any_extend [sign_extend zero_extend])
+(define_code_attr s [(sign_extend "s") (zero_extend "")])
+(define_code_attr u [(sign_extend "") (zero_extend "u")])
+(define_code_attr su [(sign_extend "s") (zero_extend "u")])
+
+(define_insn "mulqi3_highpart"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (truncate:QI
+ (lshiftrt:HI
+ (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "d"))
+ (any_extend:HI (match_operand:QI 2 "register_operand" "d")))
+ (const_int 8))))]
+ "AVR_HAVE_MUL && !optimize_size"
+ "mul %1,%2
+ mov %0,r1
+ clr r1"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "mulqihi3"
[(set (match_operand:HI 0 "register_operand" "=r")
- (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "d"))
- (sign_extend:HI (match_operand:QI 2 "register_operand" "d"))))]
+ (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "d"))
+ (any_extend:HI (match_operand:QI 2 "register_operand" "d"))))]
"AVR_HAVE_MUL"
- "muls %1,%2
+ "mul %1,%2
movw %0,r0
clr r1"
[(set_attr "length" "3")
(set_attr "cc" "clobber")])
-(define_insn "umulqihi3"
+(define_insn "*sumulqihi3"
[(set (match_operand:HI 0 "register_operand" "=r")
- (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))
- (zero_extend:HI (match_operand:QI 2 "register_operand" "r"))))]
+ (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (zero_extend:HI (match_operand:QI 2 "register_operand" "a"))))]
"AVR_HAVE_MUL"
- "mul %1,%2
+ "mulsu %1,%2
+ movw %0,r0
+ clr r1"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*usmulqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (sign_extend:HI (match_operand:QI 2 "register_operand" "a"))))]
+ "AVR_HAVE_MUL"
+ "mulsu %2,%1
movw %0,r0
clr r1"
[(set_attr "length" "3")
@@ -1028,6 +1066,50 @@
[(set_attr "type" "xcall")
(set_attr "cc" "clobber")])
+(define_expand "mulhisi3"
+ [(set (reg:HI 18) (match_operand:SI 1 "register_operand" ""))
+ (set (reg:HI 20) (match_operand:SI 2 "register_operand" ""))
+ (set (reg:SI 22)
+ (mult:SI (any_extend:SI (reg:HI 18))
+ (any_extend:SI (reg:HI 20))))
+ (set (match_operand:SI 0 "register_operand" "") (reg:SI 22))]
+ "!optimize_size"
+ "")
+
+(define_insn "*mulhisi3_call"
+ [(set (reg:SI 22)
+ (mult:SI (any_extend:SI (reg:HI 18))
+ (any_extend:SI (reg:HI 20))))]
+ "!optimize_size"
+ "%~call __mulhisi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_expand "mulhi3_highpart"
+ [(set (reg:HI 18) (match_operand:HI 1 "register_operand" ""))
+ (set (reg:HI 20) (match_operand:HI 2 "register_operand" ""))
+ (set (reg:HI 24) (truncate:HI (lshiftrt:SI
+ (mult:SI (any_extend:SI (reg:HI 18))
+ (any_extend:SI (reg:HI 20)))
+ (const_int 16))))
+ (set (match_operand:SI 0 "register_operand" "") (reg:HI 24))]
+ "AVR_HAVE_MUL"
+ "")
+
+(define_insn_and_split "*mulhi3_highpart_call"
+ [(set (reg:HI 24) (truncate:HI (lshiftrt:SI
+ (mult:SI (any_extend:SI (reg:HI 18))
+ (any_extend:SI (reg:HI 20)))
+ (const_int 16))))]
+ "AVR_HAVE_MUL"
+ ""
+ ""
+ [(set (reg:SI 22)
+ (mult:SI (any_extend:SI (reg:HI 18))
+ (any_extend:SI (reg:HI 20))))
+ (clobber (reg:HI 22))]
+ "")
+
;; Operand 2 (reg:SI 18) not clobbered on the enhanced core.
;; All call-used registers clobbered otherwise - normal library call.
(define_expand "mulsi3"
@@ -1572,9 +1654,9 @@
;;<< << << << << << << << << << << << << << << << << << << << << << << << << <<
;; arithmetic shift left
-(define_expand "ashlqi3"
- [(set (match_operand:QI 0 "register_operand" "")
- (ashift:QI (match_operand:QI 1 "register_operand" "")
+(define_expand "ashl3"
+ [(set (match_operand:ALLQ 0 "register_operand" "")
+ (ashift:ALLQ (match_operand:ALLQ 1 "register_operand" "")
(match_operand:QI 2 "general_operand" "")))]
""
"")
@@ -1608,27 +1690,27 @@
(set (match_dup 0) (and:QI (match_dup 0) (const_int -64)))]
"")
-(define_insn "*ashlqi3"
- [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,!d,r,r")
- (ashift:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0,0")
+(define_insn "*ashl3"
+ [(set (match_operand:ALLQ 0 "register_operand" "=r,r,r,r,!d,r,r")
+ (ashift:ALLQ (match_operand:ALLQ 1 "register_operand" "0,0,0,0,0,0,0")
(match_operand:QI 2 "general_operand" "r,L,P,K,n,n,Qm")))]
""
"* return ashlqi3_out (insn, operands, NULL);"
[(set_attr "length" "5,0,1,2,4,6,9")
(set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,set_czn,clobber")])
-(define_insn "ashlhi3"
- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
- (ashift:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
+(define_insn "ashl3"
+ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r,r,r")
+ (ashift:ALLH (match_operand:ALLH 1 "register_operand" "0,0,0,r,0,0,0")
(match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
""
"* return ashlhi3_out (insn, operands, NULL);"
[(set_attr "length" "6,0,2,2,4,10,10")
(set_attr "cc" "clobber,none,set_n,clobber,set_n,clobber,clobber")])
-(define_insn "ashlsi3"
- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
- (ashift:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0,0,0")
+(define_insn "ashl3"
+ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r,r,r,r")
+ (ashift:ALLS (match_operand:ALLS 1 "register_operand" "0,0,0,r,0,0,0")
(match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
""
"* return ashlsi3_out (insn, operands, NULL);"
@@ -1674,17 +1756,17 @@
(define_peephole2
[(match_scratch:QI 3 "d")
- (set (match_operand:HI 0 "register_operand" "")
- (ashift:HI (match_operand:HI 1 "register_operand" "")
+ (set (match_operand:ALLH 0 "register_operand" "")
+ (ashift:ALLH (match_operand:ALLH 1 "register_operand" "")
(match_operand:QI 2 "const_int_operand" "")))]
""
- [(parallel [(set (match_dup 0) (ashift:HI (match_dup 1) (match_dup 2)))
+ [(parallel [(set (match_dup 0) (ashift:ALLH (match_dup 1) (match_dup 2)))
(clobber (match_dup 3))])]
"")
-(define_insn "*ashlhi3_const"
- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
- (ashift:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
+(define_insn "*ashl3_const"
+ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r")
+ (ashift:ALLH (match_operand:ALLH 1 "register_operand" "0,0,r,0,0")
(match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
(clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
"reload_completed"
@@ -1694,17 +1776,17 @@
(define_peephole2
[(match_scratch:QI 3 "d")
- (set (match_operand:SI 0 "register_operand" "")
- (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (set (match_operand:ALLS 0 "register_operand" "")
+ (ashift:ALLS (match_operand:ALLS 1 "register_operand" "")
(match_operand:QI 2 "const_int_operand" "")))]
""
- [(parallel [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
+ [(parallel [(set (match_dup 0) (ashift:ALLS (match_dup 1) (match_dup 2)))
(clobber (match_dup 3))])]
"")
-(define_insn "*ashlsi3_const"
- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
- (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
+(define_insn "*ashl3_const"
+ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r")
+ (ashift:ALLS (match_operand:ALLS 1 "register_operand" "0,0,r,0")
(match_operand:QI 2 "const_int_operand" "L,P,O,n")))
(clobber (match_scratch:QI 3 "=X,X,X,&d"))]
"reload_completed"
@@ -1715,27 +1797,27 @@
;; >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
;; arithmetic shift right
-(define_insn "ashrqi3"
- [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,r,r")
- (ashiftrt:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0")
+(define_insn "ashr3"
+ [(set (match_operand:ALLQ 0 "register_operand" "=r,r,r,r,r,r")
+ (ashiftrt:ALLQ (match_operand:ALLQ 1 "register_operand" "0,0,0,0,0,0")
(match_operand:QI 2 "general_operand" "r,L,P,K,n,Qm")))]
""
"* return ashrqi3_out (insn, operands, NULL);"
[(set_attr "length" "5,0,1,2,5,9")
(set_attr "cc" "clobber,none,clobber,clobber,clobber,clobber")])
-(define_insn "ashrhi3"
- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
- (ashiftrt:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
+(define_insn "ashr3"
+ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r,r,r")
+ (ashiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,0,r,0,0,0")
(match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
""
"* return ashrhi3_out (insn, operands, NULL);"
[(set_attr "length" "6,0,2,4,4,10,10")
(set_attr "cc" "clobber,none,clobber,set_n,clobber,clobber,clobber")])
-(define_insn "ashrsi3"
- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
- (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0,0,0")
+(define_insn "ashr3"
+ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r,r,r,r")
+ (ashiftrt:ALLS (match_operand:ALLS 1 "register_operand" "0,0,0,r,0,0,0")
(match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
""
"* return ashrsi3_out (insn, operands, NULL);"
@@ -1746,17 +1828,17 @@
(define_peephole2
[(match_scratch:QI 3 "d")
- (set (match_operand:HI 0 "register_operand" "")
- (ashiftrt:HI (match_operand:HI 1 "register_operand" "")
+ (set (match_operand:ALLH 0 "register_operand" "")
+ (ashiftrt:ALLH (match_operand:ALLH 1 "register_operand" "")
(match_operand:QI 2 "const_int_operand" "")))]
""
- [(parallel [(set (match_dup 0) (ashiftrt:HI (match_dup 1) (match_dup 2)))
+ [(parallel [(set (match_dup 0) (ashiftrt:ALLH (match_dup 1) (match_dup 2)))
(clobber (match_dup 3))])]
"")
(define_insn "*ashrhi3_const"
- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
- (ashiftrt:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
+ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r")
+ (ashiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,r,0,0")
(match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
(clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
"reload_completed"
@@ -1766,17 +1848,17 @@
(define_peephole2
[(match_scratch:QI 3 "d")
- (set (match_operand:SI 0 "register_operand" "")
- (ashiftrt:SI (match_operand:SI 1 "register_operand" "")
+ (set (match_operand:ALLS 0 "register_operand" "")
+ (ashiftrt:ALLS (match_operand:ALLS 1 "register_operand" "")
(match_operand:QI 2 "const_int_operand" "")))]
""
- [(parallel [(set (match_dup 0) (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ [(parallel [(set (match_dup 0) (ashiftrt:ALLS (match_dup 1) (match_dup 2)))
(clobber (match_dup 3))])]
"")
(define_insn "*ashrsi3_const"
- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
- (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
+ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r")
+ (ashiftrt:ALLS (match_operand:ALLS 1 "register_operand" "0,0,r,0")
(match_operand:QI 2 "const_int_operand" "L,P,O,n")))
(clobber (match_scratch:QI 3 "=X,X,X,&d"))]
"reload_completed"
@@ -1787,54 +1869,54 @@
;; >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
;; logical shift right
-(define_expand "lshrqi3"
- [(set (match_operand:QI 0 "register_operand" "")
- (lshiftrt:QI (match_operand:QI 1 "register_operand" "")
- (match_operand:QI 2 "general_operand" "")))]
+(define_expand "lshr3"
+ [(set (match_operand:ALLQ 0 "register_operand" "")
+ (lshiftrt:ALLQ (match_operand:ALLQ 1 "register_operand" "")
+ (match_operand:ALLQ 2 "general_operand" "")))]
""
"")
(define_split ; lshrqi3_const4
- [(set (match_operand:QI 0 "d_register_operand" "")
- (lshiftrt:QI (match_dup 0)
+ [(set (match_operand:ALLQ 0 "d_register_operand" "")
+ (lshiftrt:ALLQ (match_dup 0)
(const_int 4)))]
""
- [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
- (set (match_dup 0) (and:QI (match_dup 0) (const_int 15)))]
+ [(set (match_dup 0) (rotate:ALLQ (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (and:ALLQ (match_dup 0) (const_int 15)))]
"")
(define_split ; lshrqi3_const5
- [(set (match_operand:QI 0 "d_register_operand" "")
- (lshiftrt:QI (match_dup 0)
+ [(set (match_operand:ALLQ 0 "d_register_operand" "")
+ (lshiftrt:ALLQ (match_dup 0)
(const_int 5)))]
""
- [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
- (set (match_dup 0) (lshiftrt:QI (match_dup 0) (const_int 1)))
- (set (match_dup 0) (and:QI (match_dup 0) (const_int 7)))]
+ [(set (match_dup 0) (rotate:ALLQ (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (lshiftrt:ALLQ (match_dup 0) (const_int 1)))
+ (set (match_dup 0) (and:ALLQ (match_dup 0) (const_int 7)))]
"")
(define_split ; lshrqi3_const6
- [(set (match_operand:QI 0 "d_register_operand" "")
- (lshiftrt:QI (match_dup 0)
+ [(set (match_operand:ALLQ 0 "d_register_operand" "")
+ (lshiftrt:ALLQ (match_dup 0)
(const_int 6)))]
""
- [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
- (set (match_dup 0) (lshiftrt:QI (match_dup 0) (const_int 2)))
- (set (match_dup 0) (and:QI (match_dup 0) (const_int 3)))]
+ [(set (match_dup 0) (rotate:ALLQ (match_dup 0) (const_int 4)))
+ (set (match_dup 0) (lshiftrt:ALLQ (match_dup 0) (const_int 2)))
+ (set (match_dup 0) (and:ALLQ (match_dup 0) (const_int 3)))]
"")
(define_insn "*lshrqi3"
- [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,!d,r,r")
- (lshiftrt:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0,0")
- (match_operand:QI 2 "general_operand" "r,L,P,K,n,n,Qm")))]
+ [(set (match_operand:ALLQ 0 "register_operand" "=r,r,r,r,!d,r,r")
+ (lshiftrt:ALLQ (match_operand:ALLQ 1 "register_operand" "0,0,0,0,0,0,0")
+ (match_operand:ALLQ 2 "general_operand" "r,L,P,K,n,n,Qm")))]
""
"* return lshrqi3_out (insn, operands, NULL);"
[(set_attr "length" "5,0,1,2,4,6,9")
(set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,set_czn,clobber")])
-(define_insn "lshrhi3"
- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
- (lshiftrt:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
+(define_insn "lshr3"
+ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r,r,r")
+ (lshiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,0,r,0,0,0")
(match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
""
"* return lshrhi3_out (insn, operands, NULL);"
@@ -1889,17 +1971,17 @@
(define_peephole2
[(match_scratch:QI 3 "d")
- (set (match_operand:HI 0 "register_operand" "")
- (lshiftrt:HI (match_operand:HI 1 "register_operand" "")
+ (set (match_operand:ALLH 0 "register_operand" "")
+ (lshiftrt:ALLH (match_operand:ALLH 1 "register_operand" "")
(match_operand:QI 2 "const_int_operand" "")))]
""
- [(parallel [(set (match_dup 0) (lshiftrt:HI (match_dup 1) (match_dup 2)))
+ [(parallel [(set (match_dup 0) (lshiftrt:ALLH (match_dup 1) (match_dup 2)))
(clobber (match_dup 3))])]
"")
-(define_insn "*lshrhi3_const"
- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
- (lshiftrt:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
+(define_insn "*lshr3_const"
+ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r")
+ (lshiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,r,0,0")
(match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
(clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
"reload_completed"
@@ -1917,9 +1999,9 @@
(clobber (match_dup 3))])]
"")
-(define_insn "*lshrsi3_const"
- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
- (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
+(define_insn "*lshr3_const"
+ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r")
+ (lshiftrt:ALLS (match_operand:ALLS 1 "register_operand" "0,0,r,0")
(match_operand:QI 2 "const_int_operand" "L,P,O,n")))
(clobber (match_scratch:QI 3 "=X,X,X,&d"))]
"reload_completed"
@@ -2163,27 +2245,27 @@
;; compare
; Optimize negated tests into reverse compare if overflow is undefined.
-(define_insn "*negated_tstqi"
+(define_insn "*negated_tst"
[(set (cc0)
- (compare (neg:QI (match_operand:QI 0 "register_operand" "r"))
+ (compare (neg:ALLQ (match_operand:ALLQ 0 "register_operand" "r"))
(const_int 0)))]
"(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
"cp __zero_reg__,%0"
[(set_attr "cc" "compare")
(set_attr "length" "1")])
-(define_insn "*reversed_tstqi"
+(define_insn "*reversed_tst"
[(set (cc0)
(compare (const_int 0)
- (match_operand:QI 0 "register_operand" "r")))]
+ (match_operand:ALLQ 0 "register_operand" "r")))]
""
"cp __zero_reg__,%0"
[(set_attr "cc" "compare")
(set_attr "length" "2")])
-(define_insn "*negated_tsthi"
+(define_insn "*negated_tst"
[(set (cc0)
- (compare (neg:HI (match_operand:HI 0 "register_operand" "r"))
+ (compare (neg:ALLH (match_operand:ALLH 0 "register_operand" "r"))
(const_int 0)))]
"(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
"cp __zero_reg__,%A0
@@ -2193,10 +2275,10 @@
;; Leave here the clobber used by the cmphi pattern for simplicity, even
;; though it is unused, because this pattern is synthesized by avr_reorg.
-(define_insn "*reversed_tsthi"
+(define_insn "*reversed_tst"
[(set (cc0)
(compare (const_int 0)
- (match_operand:HI 0 "register_operand" "r")))
+ (match_operand:ALLH 0 "register_operand" "r")))
(clobber (match_scratch:QI 1 "=X"))]
""
"cp __zero_reg__,%A0
@@ -2204,9 +2286,9 @@
[(set_attr "cc" "compare")
(set_attr "length" "2")])
-(define_insn "*negated_tstsi"
+(define_insn "*negated_tst"
[(set (cc0)
- (compare (neg:SI (match_operand:SI 0 "register_operand" "r"))
+ (compare (neg:ALLS (match_operand:ALLS 0 "register_operand" "r"))
(const_int 0)))]
"(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
"cp __zero_reg__,%A0
@@ -2216,10 +2298,10 @@
[(set_attr "cc" "compare")
(set_attr "length" "4")])
-(define_insn "*reversed_tstsi"
+(define_insn "*reversed_tst"
[(set (cc0)
(compare (const_int 0)
- (match_operand:SI 0 "register_operand" "r")))
+ (match_operand:ALLS 0 "register_operand" "r")))
(clobber (match_scratch:QI 1 "=X"))]
""
"cp __zero_reg__,%A0
@@ -2230,10 +2312,10 @@
(set_attr "length" "4")])
-(define_insn "*cmpqi"
+(define_insn "*cmp"
[(set (cc0)
- (compare (match_operand:QI 0 "register_operand" "r,r,d")
- (match_operand:QI 1 "nonmemory_operand" "L,r,i")))]
+ (compare (match_operand:ALLQ 0 "register_operand" "r,r,d")
+ (match_operand:ALLQ 1 "nonmemory_operand" "L,r,i")))]
""
"@
tst %0
@@ -2252,10 +2334,10 @@
[(set_attr "cc" "compare")
(set_attr "length" "1")])
-(define_insn "*cmphi"
+(define_insn "*cmp"
[(set (cc0)
- (compare (match_operand:HI 0 "register_operand" "!w,r,r,d,d,r,r")
- (match_operand:HI 1 "nonmemory_operand" "L,L,r,M,i,M,i")))
+ (compare (match_operand:ALLH 0 "register_operand" "!w,r,r,d,d,r,r")
+ (match_operand:ALLH 1 "nonmemory_operand" "L,L,r,M,i,M,i")))
(clobber (match_scratch:QI 2 "=X,X,X,X,&d,&d,&d"))]
""
"*{
@@ -2300,10 +2382,10 @@
(set_attr "length" "1,2,2,2,3,3,4")])
-(define_insn "*cmpsi"
+(define_insn "*cmp"
[(set (cc0)
- (compare (match_operand:SI 0 "register_operand" "r,r,d,d,r,r")
- (match_operand:SI 1 "nonmemory_operand" "L,r,M,i,M,i")))
+ (compare (match_operand:ALLS 0 "register_operand" "r,r,d,d,r,r")
+ (match_operand:ALLS 1 "nonmemory_operand" "L,r,M,i,M,i")))
(clobber (match_scratch:QI 2 "=X,X,X,&d,&d,&d"))]
""
"*{
diff -rupN gcc/config/avr/avr-modes.def gcc/config/avr/avr-modes.def
--- gcc/config/avr/avr-modes.def 1969-12-31 18:00:00.000000000 -0600
+++ gcc/config/avr/avr-modes.def 2010-09-21 14:31:30.000000000 -0500
@@ -0,0 +1,34 @@
+/* Definitions of target machine for GCC for AVR.
+ Copyright (C) 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+/* On 8 bit machines it requires fewer instructions for fixed point
+ routines if the decimal place is on a byte boundary which is not
+ the default for signed accum types. */
+
+ADJUST_IBIT (HA, 7);
+ADJUST_FBIT (HA, 8);
+
+ADJUST_IBIT (SA, 15);
+ADJUST_FBIT (SA, 16);
+
+ADJUST_IBIT (DA, 31);
+ADJUST_FBIT (DA, 32);
+
+ADJUST_IBIT (TA, 63);
+ADJUST_FBIT (TA, 64);
diff -rupN gcc/config/avr/avr-protos.h gcc/config/avr/avr-protos.h
--- gcc/config/avr/avr-protos.h 2010-01-08 17:01:45.000000000 -0600
+++ gcc/config/avr/avr-protos.h 2010-09-21 14:31:30.000000000 -0500
@@ -84,6 +84,8 @@ extern const char *lshrhi3_out (rtx insn
extern const char *lshrsi3_out (rtx insn, rtx operands[], int *len);
extern bool avr_rotate_bytes (rtx operands[]);
+extern const char *fract_out (rtx insn, rtx operands[], int intsigned, int *l);
+
extern void expand_prologue (void);
extern void expand_epilogue (void);
extern int avr_epilogue_uses (int regno);
diff -rupN gcc/config/avr/libgcc-fixed.S gcc/config/avr/libgcc-fixed.S
--- gcc/config/avr/libgcc-fixed.S 1969-12-31 18:00:00.000000000 -0600
+++ gcc/config/avr/libgcc-fixed.S 2010-09-21 14:31:30.000000000 -0500
@@ -0,0 +1,1123 @@
+/* -*- Mode: Asm -*- */
+/* Copyright (C) 2009
+ Free Software Foundation, Inc.
+ Contributed by Sean D'Epagnier
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Fixed point library routines for avr. */
+
+#define __zero_reg__ r1
+#define __tmp_reg__ r0
+#define __SREG__ 0x3f
+#define __SP_H__ 0x3e
+#define __SP_L__ 0x3d
+#define __RAMPZ__ 0x3B
+
+/* Conversions to float. */
+#if defined (L_fractqqsf)
+ .global __fractqqsf
+ .func __fractqqsf
+__fractqqsf:
+ clr r25
+ sbrc r24, 7 ; if negative
+ ser r25 ; sign extend
+ mov r23, r24 ; move in place
+ mov r24, r25 ; sign extend lower byte
+ lsl r23
+ clr r22
+ rjmp __fractsasf ; call larger conversion
+.endfunc
+#endif /* defined (L_fractqqsf) */
+
+#if defined (L_fractuqqsf)
+ .global __fractuqqsf
+ .func __fractuqqsf
+__fractuqqsf:
+ clr r22
+ mov r23, r24
+ clr r24
+ clr r25
+ rjmp __fractsasf ; call larger conversion
+.endfunc
+#endif /* defined (L_fractuqqsf) */
+
+#if defined (L_fracthqsf)
+ .global __fracthqsf
+ .func __fracthqsf
+__fracthqsf:
+ mov_l r22, r24 ; put fractional part in place
+ mov_h r23, r25
+ clr r25
+ sbrc r23, 7 ; if negative
+ ser r25 ; sign extend
+ mov r24, r25 ; sign extend lower byte
+ lsl r22
+ rol r23
+ rjmp __fractsasf ; call larger conversion
+.endfunc
+#endif /* defined (L_fracthqsf) */
+
+#if defined (L_fractuhqsf)
+ .global __fractuhqsf
+ .func __fractuhqsf
+__fractuhqsf:
+ mov_l r22, r24 ; put fractional part in place
+ mov_h r23, r25
+ clr r24
+ clr r25
+ rjmp __fractsasf ; call larger conversion
+.endfunc
+#endif /* defined (L_fractuhqsf) */
+
+#if defined (L_fracthasf)
+ .global __fracthasf
+ .func __fracthasf
+__fracthasf:
+ clr r22
+ mov r23, r24 ; move into place
+ mov r24, r25
+ clr r25
+ sbrc r24, 7 ; if negative
+ ser r25 ; sign extend
+ rjmp __fractsasf ; call larger conversion
+#endif /* defined (L_fracthasf) */
+
+#if defined (L_fractuhasf)
+ .global __fractuhasf
+ .func __fractuhasf
+__fractuhasf:
+ clr r22
+ mov r23, r24 ; move into place
+ rjmp __fractsasf ; call larger conversion
+.endfunc
+#endif /* defined (L_fractuhasf) */
+
+#if defined (L_fractsasf)
+ .global __fractsasf
+ .func __fractsasf
+__fractsasf:
+ rcall __floatsisf
+ tst r25
+ breq __fractsasf_exit ; skip if zero
+ subi r25, 0x08 ; adjust exponent
+__fractsasf_exit:
+ ret
+.endfunc
+#endif /* defined (L_fractsasf) */
+
+#if defined (L_fractusasf)
+ .global __fractusasf
+ .func __fractusasf
+__fractusasf:
+ rcall __floatunsisf
+ tst r25
+ breq __fractusasf_exit ; skip if zero
+ subi r25, 0x08 ; adjust exponent
+__fractusasf_exit:
+ ret
+.endfunc
+#endif /* defined (L_fractusasf) */
+
+#if defined (L_fractsfqq) /* Conversions from float. */
+ .global __fractsfqq
+ .func __fractsfqq
+__fractsfqq:
+ subi r25, -11 ; adjust exponent
+ subi r24, 128
+ rjmp __fixsfsi
+.endfunc
+#endif /* defined (L_fractqq) */
+
+#if defined (L_fractsfuqq)
+ .global __fractsfuqq
+ .func __fractsfuqq
+__fractsfuqq:
+ subi r25, -12 ; adjust exponent
+ rjmp __fixsfsi
+.endfunc
+#endif /* defined (L_fractuqq) */
+
+#if defined (L_fractsfhq)
+ .global __fractsfhq
+ .func __fractsfhq
+__fractsfhq:
+ subi r25, -15 ; adjust exponent
+ subi r24, 128
+ rjmp __fixsfsi
+.endfunc
+#endif /* defined (L_fractsfhq) */
+
+#if defined (L_fractsfuhq)
+ .global __fractsfuhq
+ .func __fractsfuhq
+__fractsfuhq:
+ subi r25, -16 ; adjust exponent
+ rjmp __fixsfsi
+.endfunc
+#endif /* defined (L_fractsfuhq) */
+
+#if defined (L_fractsfha)
+ .global __fractsfha
+ .func __fractsfha
+__fractsfha:
+.endfunc
+ .global __fractsfuha
+ .func __fractsfuha
+__fractsfuha:
+ subi r25, -12 ; adjust exponent
+ rjmp __fixsfsi
+.endfunc
+#endif /* defined (L_fractsfha) */
+
+#if defined (L_fractsfsa)
+ .global __fractsfsa
+ .func __fractsfsa
+__fractsfsa:
+.endfunc
+ .global __fractsfusa
+ .func __fractsfusa
+__fractsfusa:
+ subi r25, -8 ; adjust exponent
+ rjmp __fixsfsi
+.endfunc
+#endif /* defined (L_fractsfsa) */
+
+/* For multiplication the functions here are called directly from
+ avr-fixed.md patterns, instead of using the standard libcall mechanisms.
+ This can make better code because GCC knows exactly which
+ of the call-used registers (not all of them) are clobbered. */
+
+/* mulqq and muluqq open coded on the enhanced core */
+#if !defined (__AVR_HAVE_MUL__)
+/*******************************************************
+ Fractional Multiplication 8 x 8
+*******************************************************/
+#define r_arg2 r22 /* multiplicand */
+#define r_arg1 r24 /* multiplier */
+#define r_res __tmp_reg__ /* result */
+
+#if defined (L_mulqq3)
+ .global __mulqq3
+ .func __mulqq3
+__mulqq3:
+ mov r_res, r_arg1
+ eor r_res, r_arg2
+ bst r_res, 7
+ lsl r_arg1
+ lsl r_arg2
+ brcc __mulqq3_skipneg
+ neg r_arg2
+__mulqq3_skipneg:
+ rcall __muluqq3
+ lsr r_arg1
+ brtc __mulqq3_exit
+ neg r_arg1
+__mulqq3_exit:
+ ret
+
+.endfunc
+#endif /* defined (L_mulqq3) */
+
+#if defined (L_muluqq3)
+ .global __muluqq3
+ .func __muluqq3
+__muluqq3:
+ clr r_res ; clear result
+__muluqq3_loop:
+ lsr r_arg2 ; shift multiplicand
+ sbrc r_arg1,7
+ add r_res,r_arg2
+ breq __muluqq3_exit ; while multiplicand != 0
+ lsl r_arg1
+ brne __muluqq3_loop ; exit if multiplier = 0
+__muluqq3_exit:
+ mov r_arg1,r_res ; result to return register
+ ret
+#undef r_arg2
+#undef r_arg1
+#undef r_res
+
+.endfunc
+#endif /* defined (L_muluqq3) */
+#endif /* !defined (__AVR_HAVE_MUL__) */
+
+/*******************************************************
+ Fractional Multiplication 16 x 16
+*******************************************************/
+
+#if defined (__AVR_HAVE_MUL__)
+#define r_arg1L r22 /* multiplier Low */
+#define r_arg1H r23 /* multiplier High */
+#define r_arg2L r20 /* multiplicand Low */
+#define r_arg2H r21 /* multiplicand High */
+#define r_resL r18 /* result Low */
+#define r_resH r19 /* result High */
+
+#if defined (L_mulhq3)
+ .global __mulhq3
+ .func __mulhq3
+__mulhq3:
+ fmuls r_arg1H, r_arg2H
+ movw r_resL, r0
+ fmulsu r_arg2H, r_arg1L
+ clr r_arg1L
+ sbc r_resH, r_arg1L
+ add r_resL, r1
+ adc r_resH, r_arg1L
+ fmulsu r_arg1H, r_arg2L
+ sbc r_resH, r_arg1L
+ add r_resL, r1
+ adc r_resH, r_arg1L
+ clr __zero_reg__
+ ret
+.endfunc
+#endif /* defined (L_mulhq3) */
+
+#if defined (L_muluhq3)
+ .global __muluhq3
+ .func __muluhq3
+__muluhq3:
+ mul r_arg1H, r_arg2H
+ movw r_resL, r0
+ mul r_arg1H, r_arg2L
+ add r_resL, r1
+ clr __zero_reg__
+ adc r_resH, __zero_reg__
+ mul r_arg1L, r_arg2H
+ add r_resL, r1
+ clr __zero_reg__
+ adc r_resH, __zero_reg__
+ ret
+.endfunc
+#endif /* defined (L_muluhq3) */
+
+#else
+#define r_arg1L r24 /* multiplier Low */
+#define r_arg1H r25 /* multiplier High */
+#define r_arg2L r22 /* multiplicand Low */
+#define r_arg2H r23 /* multiplicand High */
+#define r_resL __tmp_reg__ /* result Low */
+#define r_resH __zero_reg__ /* result High */
+
+#if defined (L_mulhq3)
+ .global __mulhq3
+ .func __mulhq3
+__mulhq3:
+ mov r_resL, r_arg1H
+ eor r_resL, r_arg2H
+ bst r_resL, 7
+ lsl r_arg1L
+ rol r_arg1H
+ lsl r_arg2L
+ rol r_arg2H
+ brcc mulhq3_skipneg
+ com r_arg2H
+ neg r_arg2L
+ sbci r_arg2H, -1
+mulhq3_skipneg:
+ rcall __muluhq3
+ lsr r_arg1H
+ ror r_arg1L
+ brtc mulhq3_exit
+ com r_arg1H
+ neg r_arg1L
+ sbci r_arg1H, -1
+mulhq3_exit:
+ ret
+.endfunc
+#endif /* defined (L_mulhq3) */
+
+#if defined (L_muluhq3)
+ .global __muluhq3
+ .func __muluhq3
+__muluhq3:
+ clr r_resL ; clear result
+__muluhq3_loop:
+ lsr r_arg2H ; shift multiplicand
+ ror r_arg2L
+ sbrs r_arg1H,7
+ rjmp __muluhq3_skip
+ add r_resL,r_arg2L ; result + multiplicand
+ adc r_resH,r_arg2H
+__muluhq3_skip:
+ lsl r_arg1L ; shift multiplier
+ rol r_arg1H
+ brne __muluhq3_loop
+ cpi r_arg1L, 0
+ brne __muluhq3_loop ; exit multiplier = 0
+ mov_l r_arg1L,r_resL
+ mov_h r_arg1H,r_resH ; result to return register
+ clr __zero_reg__ ; zero the zero reg
+ ret
+.endfunc
+#endif /* defined (L_muluhq3) */
+
+#endif /* defined (__AVR_HAVE_MUL__) */
+
+#undef r_arg1L
+#undef r_arg1H
+#undef r_arg2L
+#undef r_arg2H
+#undef r_resL
+#undef r_resH
+
+/*******************************************************
+ Fixed Multiplication 8.8 x 8.8
+*******************************************************/
+
+#if defined (__AVR_HAVE_MUL__)
+#define r_arg1L r22 /* multiplier Low */
+#define r_arg1H r23 /* multiplier High */
+#define r_arg2L r20 /* multiplicand Low */
+#define r_arg2H r21 /* multiplicand High */
+#define r_resL r18 /* result Low */
+#define r_resH r19 /* result High */
+
+#if defined (L_mulha3)
+ .global __mulha3
+ .func __mulha3
+__mulha3:
+ mul r_arg1L, r_arg2L
+ mov r_resL, r1
+ muls r_arg1H, r_arg2H
+ mov r_resH, r0
+ mulsu r_arg1H, r_arg2L
+ add r_resL, r0
+ adc r_resH, r1
+ mulsu r_arg2H, r_arg1L
+ add r_resL, r0
+ adc r_resH, r1
+ clr __zero_reg__
+ ret
+.endfunc
+#endif /* defined (L_mulha3) */
+
+#if defined (L_muluha3)
+ .global __muluha3
+ .func __muluha3
+__muluha3:
+ mul r_arg1L, r_arg2L
+ mov r_resL, r1
+ mul r_arg1H, r_arg2H
+ mov r_resH, r0
+ mul r_arg1H, r_arg2L
+ add r_resL, r0
+ adc r_resH, r1
+ mul r_arg1L, r_arg2H
+ add r_resL, r0
+ adc r_resH, r1
+ clr __zero_reg__
+ ret
+.endfunc
+#endif /* defined (L_muluha3) */
+
+#else
+
+#define r_arg1L r24 /* multiplier Low */
+#define r_arg1H r25 /* multiplier High */
+#define r_arg2L r22 /* multiplicand Low */
+#define r_arg2H r23 /* multiplicand High */
+#define r_resL r18 /* result Low */
+#define r_resH r19 /* result High */
+#define r_scratchL r0 /* scratch Low */
+#define r_scratchH r1
+
+#if defined (L_mulha3)
+ .global __mulha3
+ .func __mulha3
+__mulha3:
+ mov r_resL, r_arg1H
+ eor r_resL, r_arg2H
+ bst r_resL, 7
+ sbrs r_arg1H, 7
+ rjmp __mulha3_arg1pos
+ com r_arg1H
+ neg r_arg1L
+ sbci r_arg1H,-1
+__mulha3_arg1pos:
+ sbrs r_arg2H, 7
+ rjmp __mulha3_arg2pos
+ com r_arg2H
+ neg r_arg2L
+ sbci r_arg2H,-1
+__mulha3_arg2pos:
+ rcall __muluha3
+ brtc __mulha3_exit
+ com r_resH
+ neg r_resL
+ sbci r_resH,-1
+__mulha3_exit:
+ ret
+.endfunc
+#endif /* defined (L_mulha3) */
+
+#if defined (L_muluha3)
+ .global __muluha3
+ .func __muluha3
+__muluha3:
+ clr r_resL ; clear result
+ clr r_resH
+ mov_l r0, r_arg1L ; save multiplicand
+ mov_h r1, r_arg1H
+__muluha3_loop1:
+ sbrs r_arg2H,0
+ rjmp __muluha3_skip1
+ add r_resL,r_arg1L ; result + multiplicand
+ adc r_resH,r_arg1H
+__muluha3_skip1:
+ lsl r_arg1L ; shift multiplicand
+ rol r_arg1H
+ sbiw r_arg1L,0
+ breq __muluha3_loop1_done ; exit multiplicand = 0
+ lsr r_arg2H
+ brne __muluha3_loop1 ; exit multiplier = 0
+__muluha3_loop1_done:
+ mov_l r_arg1L, r_scratchL ; restore multiplicand
+ mov_h r_arg1H, r_scratchH
+__muluha3_loop2:
+ lsr r_arg1H ; shift multiplicand
+ ror r_arg1L
+ sbiw r_arg1L,0
+ breq __muluha3_exit ; exit if multiplicand = 0
+ sbrs r_arg2L,7
+ rjmp __muluha3_skip2
+ add r_resL,r_arg1L ; result + multiplicand
+ adc r_resH,r_arg1H
+__muluha3_skip2:
+ lsl r_arg2L
+ brne __muluha3_loop2 ; exit if multiplier = 0
+__muluha3_exit:
+ clr __zero_reg__ ; got clobbered
+ ret
+.endfunc
+#endif /* defined (L_muluha3) */
+
+#endif /* defined (__AVR_HAVE_MUL__) */
+
+#undef r_arg1L
+#undef r_arg1H
+#undef r_arg2L
+#undef r_arg2H
+#undef r_resL
+#undef r_resH
+
+/*******************************************************
+ Fixed Multiplication 16.16 x 16.16
+*******************************************************/
+
+#if defined (__AVR_HAVE_MUL__)
+/* uses nonstandard registers because mulus only works from 16-23 */
+#define r_clr r15
+
+#define r_arg1L r16 /* multiplier Low */
+#define r_arg1H r17
+#define r_arg1HL r18
+#define r_arg1HH r19 /* multiplier High */
+
+#define r_arg2L r20 /* multiplicand Low */
+#define r_arg2H r21
+#define r_arg2HL r22
+#define r_arg2HH r23 /* multiplicand High */
+
+#define r_resL r24 /* result Low */
+#define r_resH r25
+#define r_resHL r26
+#define r_resHH r27 /* result High */
+
+#if defined (L_mulsa3)
+ .global __mulsa3
+ .func __mulsa3
+__mulsa3:
+ clr r_clr
+ clr r_resH
+ clr r_resHL
+ clr r_resHH
+ mul r_arg1H, r_arg2L
+ mov r_resL, r1
+ mul r_arg1L, r_arg2H
+ add r_resL, r1
+ adc r_resH, r_clr
+ mul r_arg1L, r_arg2HL
+ add r_resL, r0
+ adc r_resH, r1
+ adc r_resHL, r_clr
+ mul r_arg1H, r_arg2H
+ add r_resL, r0
+ adc r_resH, r1
+ adc r_resHL, r_clr
+ mul r_arg1HL, r_arg2L
+ add r_resL, r0
+ adc r_resH, r1
+ adc r_resHL, r_clr
+ mulsu r_arg2HH, r_arg1L
+ sbc r_resHH, r_clr
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mul r_arg1H, r_arg2HL
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mul r_arg1HL, r_arg2H
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mulsu r_arg1HH, r_arg2L
+ sbc r_resHH, r_clr
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mulsu r_arg2HH, r_arg1H
+ add r_resHL, r0
+ adc r_resHH, r1
+ mul r_arg1HL, r_arg2HL
+ add r_resHL, r0
+ adc r_resHH, r1
+ mulsu r_arg1HH, r_arg2H
+ add r_resHL, r0
+ adc r_resHH, r1
+ mulsu r_arg2HH, r_arg1HL
+ add r_resHH, r0
+ mulsu r_arg1HH, r_arg2HL
+ add r_resHH, r0
+ clr __zero_reg__
+ ret
+.endfunc
+#endif
+
+#if defined (L_mulusa3)
+ .global __mulusa3
+ .func __mulusa3
+__mulusa3:
+ clr r_clr
+ clr r_resH
+ clr r_resHL
+ clr r_resHH
+ mul r_arg1H, r_arg2L
+ mov r_resL, r1
+ mul r_arg1L, r_arg2H
+ add r_resL, r1
+ adc r_resH, r_clr
+ mul r_arg1L, r_arg2HL
+ add r_resL, r0
+ adc r_resH, r1
+ adc r_resHL, r_clr
+ mul r_arg1H, r_arg2H
+ add r_resL, r0
+ adc r_resH, r1
+ adc r_resHL, r_clr
+ mul r_arg1HL, r_arg2L
+ add r_resL, r0
+ adc r_resH, r1
+ adc r_resHL, r_clr
+ mul r_arg1L, r_arg2HH
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mul r_arg1H, r_arg2HL
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mul r_arg1HL, r_arg2H
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mul r_arg1HH, r_arg2L
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mul r_arg1H, r_arg2HH
+ add r_resHL, r0
+ adc r_resHH, r1
+ mul r_arg1HL, r_arg2HL
+ add r_resHL, r0
+ adc r_resHH, r1
+ mul r_arg1HH, r_arg2H
+ add r_resHL, r0
+ adc r_resHH, r1
+ mul r_arg1HL, r_arg2HH
+ add r_resHH, r0
+ mul r_arg1HH, r_arg2HL
+ add r_resHH, r0
+ clr __zero_reg__
+ ret
+.endfunc
+#endif
+
+#else
+
+#define r_arg1L r18 /* multiplier Low */
+#define r_arg1H r19
+#define r_arg1HL r20
+#define r_arg1HH r21 /* multiplier High */
+
+/* these registers needed for sbiw */
+#define r_arg2L r24 /* multiplicand Low */
+#define r_arg2H r25
+#define r_arg2HL r26
+#define r_arg2HH r27 /* multiplicand High */
+
+#define r_resL r14 /* result Low */
+#define r_resH r15
+#define r_resHL r16
+#define r_resHH r17 /* result High */
+
+#define r_scratchL r0 /* scratch Low */
+#define r_scratchH r1
+#define r_scratchHL r22
+#define r_scratchHH r23 /* scratch High */
+
+#if defined (L_mulsa3)
+ .global __mulsa3
+ .func __mulsa3
+__mulsa3:
+ mov r_resL, r_arg1HH
+ eor r_resL, r_arg2HH
+ bst r_resL, 7
+ sbrs r_arg1HH, 7
+ rjmp __mulsa3_arg1pos
+ com r_arg1HH
+ com r_arg1HL
+ com r_arg1H
+ neg r_arg1L
+ sbci r_arg1H,-1
+ sbci r_arg1HL,-1
+ sbci r_arg1HH,-1
+__mulsa3_arg1pos:
+ sbrs r_arg2HH, 7
+ rjmp __mulsa3_arg2pos
+ com r_arg2HH
+ com r_arg2HL
+ com r_arg2H
+ neg r_arg2L
+ sbci r_arg2H,-1
+ sbci r_arg2HL,-1
+ sbci r_arg2HH,-1
+__mulsa3_arg2pos:
+ rcall __mulusa3
+ brtc __mulsa3_exit
+ com r_resHH
+ com r_resHL
+ com r_resH
+ com r_resL
+ adc r_resL,__zero_reg__
+ adc r_resH,__zero_reg__
+ adc r_resHL,__zero_reg__
+ adc r_resHH,__zero_reg__
+__mulsa3_exit:
+ ret
+.endfunc
+#endif /* defined (L_mulsa3) */
+
+#if defined (L_mulusa3)
+ .global __mulusa3
+ .func __mulusa3
+__mulusa3:
+ clr r_resL ; clear result
+ clr r_resH
+ mov_l r_resHL, r_resL
+ mov_h r_resHH, r_resH
+ mov_l r_scratchL, r_arg1L ; save multiplicand
+ mov_h r_scratchH, r_arg1H
+ mov_l r_scratchHL, r_arg1HL
+ mov_h r_scratchHH, r_arg1HH
+__mulusa3_loop1:
+ sbrs r_arg2HL,0
+ rjmp __mulusa3_skip1
+ add r_resL,r_arg1L ; result + multiplicand
+ adc r_resH,r_arg1H
+ adc r_resHL,r_arg1HL
+ adc r_resHH,r_arg1HH
+__mulusa3_skip1:
+ lsl r_arg1L ; shift multiplicand
+ rol r_arg1H
+ rol r_arg1HL
+ rol r_arg1HH
+ lsr r_arg2HH
+ ror r_arg2HL
+ sbiw r_arg2HL,0
+ brne __mulusa3_loop1 ; exit multiplier = 0
+__mulusa3_loop1_done:
+ mov_l r_arg1L, r_scratchL ; restore multiplicand
+ mov_h r_arg1H, r_scratchH
+ mov_l r_arg1HL, r_scratchHL
+ mov_h r_arg1HH, r_scratchHH
+__mulusa3_loop2:
+ lsr r_arg1HH ; shift multiplicand
+ ror r_arg1HL
+ ror r_arg1H
+ ror r_arg1L
+ sbrs r_arg2H,7
+ rjmp __mulusa3_skip2
+ add r_resL,r_arg1L ; result + multiplicand
+ adc r_resH,r_arg1H
+ adc r_resHL,r_arg1HL
+ adc r_resHH,r_arg1HH
+__mulusa3_skip2:
+ lsl r_arg2L
+ rol r_arg2H
+ sbiw r_arg2L,0
+ brne __mulusa3_loop2 ; exit if multiplier = 0
+__mulusa3_exit:
+ clr __zero_reg__ ; got clobbered
+ ret
+.endfunc
+#endif /* defined (L_mulusa3) */
+
+#undef r_scratchL
+#undef r_scratchH
+#undef r_scratchHL
+#undef r_scratchHH
+
+#endif
+
+#undef r_arg1L
+#undef r_arg1H
+#undef r_arg1HL
+#undef r_arg1HH
+
+#undef r_arg2L
+#undef r_arg2H
+#undef r_arg2HL
+#undef r_arg2HH
+
+#undef r_resL
+#undef r_resH
+#undef r_resHL
+#undef r_resHH
+
+/*******************************************************
+ Fractional Division 8 / 8
+*******************************************************/
+#define r_divd r25 /* dividend */
+#define r_quo r24 /* quotient */
+#define r_div r22 /* divisor */
+#define r_cnt r23 /* loop count */
+
+#if defined (L_divqq3)
+ .global __divqq3
+ .func __divqq3
+__divqq3:
+ mov r0, r_divd
+ eor r0, r_div
+ sbrc r_div, 7
+ neg r_div
+ sbrc r_divd, 7
+ neg r_divd
+ cp r_divd, r_div
+ breq __divqq3_minus1 ; if equal return -1
+ rcall __udivuqq3
+ lsr r_quo
+ sbrc r0, 7 ; negate result if needed
+ neg r_quo
+ ret
+__divqq3_minus1:
+ ldi r_quo, 0x80
+ ret
+.endfunc
+#endif /* defined (L_divqq3) */
+
+#if defined (L_udivuqq3)
+ .global __udivuqq3
+ .func __udivuqq3
+__udivuqq3:
+ clr r_quo ; clear quotient
+ ldi r_cnt,8 ; init loop counter
+__udivuqq3_loop:
+ lsl r_divd ; shift dividend
+ brcs __udivuqq3_ep ; dividend overflow
+ cp r_divd,r_div ; compare dividend & divisor
+ brcc __udivuqq3_ep ; dividend >= divisor
+ rol r_quo ; shift quotient (with CARRY)
+ rjmp __udivuqq3_cont
+__udivuqq3_ep:
+ sub r_divd,r_div ; restore dividend
+ lsl r_quo ; shift quotient (without CARRY)
+__udivuqq3_cont:
+ dec r_cnt ; decrement loop counter
+ brne __udivuqq3_loop
+ com r_quo ; complement result
+ ; because C flag was complemented in loop
+ ret
+.endfunc
+#endif /* defined (L_udivuqq3) */
+
+#undef r_divd
+#undef r_quo
+#undef r_div
+#undef r_cnt
+
+
+/*******************************************************
+ Fractional Division 16 / 16
+*******************************************************/
+#define r_divdL r26 /* dividend Low */
+#define r_divdH r27 /* dividend Hig */
+#define r_quoL r24 /* quotient Low */
+#define r_quoH r25 /* quotient High */
+#define r_divL r22 /* divisor */
+#define r_divH r23 /* divisor */
+#define r_cnt 21
+
+#if defined (L_divhq3)
+ .global __divhq3
+ .func __divhq3
+__divhq3:
+ mov r0, r_divdH
+ eor r0, r_divH
+ sbrs r_divH, 7
+ rjmp __divhq3_divpos
+ com r_divH
+ neg r_divL
+ sbci r_divH,-1
+__divhq3_divpos:
+ sbrs r_divdH, 7
+ rjmp __divhq3_divdpos
+ com r_divdH
+ neg r_divdL
+ sbci r_divdH,-1
+__divhq3_divdpos:
+ cp r_divdL, r_divL
+ cpc r_divdH, r_divH
+ breq __divhq3_minus1 ; if equal return -1
+ rcall __udivuhq3
+ lsr r_quoH
+ ror r_quoL
+ sbrs r0, 7 ; negate result if needed
+ ret
+ com r_quoH
+ neg r_quoL
+ sbci r_quoH,-1
+ ret
+__divhq3_minus1:
+ ldi r_quoH, 0x80
+ clr r_quoL
+ ret
+.endfunc
+#endif /* defined (L_divhq3) */
+
+#if defined (L_udivuhq3)
+ .global __udivuhq3
+ .func __udivuhq3
+__udivuhq3:
+ sub r_quoH,r_quoH ; clear quotient and carry
+ .global __udivuha3_entry
+__udivuha3_entry:
+ clr r_quoL ; clear quotient
+ ldi r_cnt,16 ; init loop counter
+__udivuhq3_loop:
+ rol r_divdL ; shift dividend (with CARRY)
+ rol r_divdH
+ brcs __udivuhq3_ep ; dividend overflow
+ cp r_divdL,r_divL ; compare dividend & divisor
+ cpc r_divdH,r_divH
+ brcc __udivuhq3_ep ; dividend >= divisor
+ rol r_quoL ; shift quotient (with CARRY)
+ rjmp __udivuhq3_cont
+__udivuhq3_ep:
+ sub r_divdL,r_divL ; restore dividend
+ sbc r_divdH,r_divH
+ lsl r_quoL ; shift quotient (without CARRY)
+__udivuhq3_cont:
+ rol r_quoH ; shift quotient
+ dec r_cnt ; decrement loop counter
+ brne __udivuhq3_loop
+ com r_quoL ; complement result
+ com r_quoH ; because C flag was complemented in loop
+ ret
+.endfunc
+#endif /* defined (L_udivuhq3) */
+
+/*******************************************************
+ Fixed Division 8.8 / 8.8
+*******************************************************/
+#if defined (L_divha3)
+ .global __divha3
+ .func __divha3
+__divha3:
+ mov r0, r_divdH
+ eor r0, r_divH
+ sbrs r_divH, 7
+ rjmp __divha3_divpos
+ com r_divH
+ neg r_divL
+ sbci r_divH,-1
+__divha3_divpos:
+ sbrs r_divdH, 7
+ rjmp __divha3_divdpos
+ com r_divdH
+ neg r_divdL
+ sbci r_divdH,-1
+__divha3_divdpos:
+ rcall __udivuha3
+ sbrs r0, 7 ; negate result if needed
+ ret
+ com r_quoH
+ neg r_quoL
+ sbci r_quoH,-1
+ ret
+.endfunc
+#endif /* defined (L_divha3) */
+
+#if defined (L_udivuha3)
+ .global __udivuha3
+ .func __udivuha3
+__udivuha3:
+ mov r_quoH, r_divdL
+ mov r_divdL, r_divdH
+ clr r_divdH
+ lsl r_quoH ; shift quotient into carry
+ rjmp __udivuha3_entry ; same as fractional after rearrange
+.endfunc
+#endif /* defined (L_udivuha3) */
+
+#undef r_divdL
+#undef r_divdH
+#undef r_quoL
+#undef r_quoH
+#undef r_divL
+#undef r_divH
+#undef r_cnt
+
+/*******************************************************
+ Fixed Division 16.16 / 16.16
+*******************************************************/
+#define r_arg1L r24 /* arg1 gets passed already in place */
+#define r_arg1H r25
+#define r_arg1HL r26
+#define r_arg1HH r27
+#define r_divdL r26 /* dividend Low */
+#define r_divdH r27
+#define r_divdHL r30
+#define r_divdHH r31 /* dividend High */
+#define r_quoL r22 /* quotient Low */
+#define r_quoH r23
+#define r_quoHL r24
+#define r_quoHH r25 /* quotient High */
+#define r_divL r18 /* divisor Low */
+#define r_divH r19
+#define r_divHL r20
+#define r_divHH r21 /* divisor High */
+#define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
+
+#if defined (L_divsa3)
+ .global __divsa3
+ .func __divsa3
+__divsa3:
+ mov r0, r27
+ eor r0, r_divHH
+ sbrs r_divHH, 7
+ rjmp __divsa3_divpos
+ com r_divHH
+ com r_divHL
+ com r_divH
+ neg r_divL
+ sbci r_divH,-1
+ sbci r_divHL,-1
+ sbci r_divHH,-1
+__divsa3_divpos:
+ sbrs r_arg1HH, 7
+ rjmp __divsa3_arg1pos
+ com r_arg1HH
+ com r_arg1HL
+ com r_arg1H
+ neg r_arg1L
+ sbci r_arg1H,-1
+ sbci r_arg1HL,-1
+ sbci r_arg1HH,-1
+__divsa3_arg1pos:
+ rcall __udivusa3
+ sbrs r0, 7 ; negate result if needed
+ ret
+ com r_quoHH
+ com r_quoHL
+ com r_quoH
+ neg r_quoL
+ sbci r_quoH,-1
+ sbci r_quoHL,-1
+ sbci r_quoHH,-1
+ ret
+.endfunc
+#endif /* defined (L_divsa3) */
+
+#if defined (L_udivusa3)
+ .global __udivusa3
+ .func __udivusa3
+__udivusa3:
+ ldi r_divdHL, 32 ; init loop counter
+ mov r_cnt, r_divdHL
+ clr r_divdHL
+ clr r_divdHH
+ mov_l r_quoL, r_divdHL
+ mov_h r_quoH, r_divdHH
+ lsl r_quoHL ; shift quotient into carry
+ rol r_quoHH
+__udivusa3_loop:
+ rol r_divdL ; shift dividend (with CARRY)
+ rol r_divdH
+ rol r_divdHL
+ rol r_divdHH
+ brcs __udivusa3_ep ; dividend overflow
+ cp r_divdL,r_divL ; compare dividend & divisor
+ cpc r_divdH,r_divH
+ cpc r_divdHL,r_divHL
+ cpc r_divdHH,r_divHH
+ brcc __udivusa3_ep ; dividend >= divisor
+ rol r_quoL ; shift quotient (with CARRY)
+ rjmp __udivusa3_cont
+__udivusa3_ep:
+ sub r_divdL,r_divL ; restore dividend
+ sbc r_divdH,r_divH
+ sbc r_divdHL,r_divHL
+ sbc r_divdHH,r_divHH
+ lsl r_quoL ; shift quotient (without CARRY)
+__udivusa3_cont:
+ rol r_quoH ; shift quotient
+ rol r_quoHL
+ rol r_quoHH
+ dec r_cnt ; decrement loop counter
+ brne __udivusa3_loop
+ com r_quoL ; complement result
+ com r_quoH ; because C flag was complemented in loop
+ com r_quoHL
+ com r_quoHH
+ ret
+.endfunc
+#endif /* defined (L_udivusa3) */
+
+#undef r_divdL
+#undef r_divdH
+#undef r_divdHL
+#undef r_divdHH
+#undef r_quoL
+#undef r_quoH
+#undef r_quoHL
+#undef r_quoHH
+#undef r_divL
+#undef r_divH
+#undef r_divHL
+#undef r_divHH
+#undef r_cnt
diff -rupN gcc/config/avr/libgcc.S gcc/config/avr/libgcc.S
--- gcc/config/avr/libgcc.S 2009-05-23 02:16:07.000000000 -0500
+++ gcc/config/avr/libgcc.S 2010-09-21 14:31:30.000000000 -0500
@@ -162,6 +162,23 @@ __mulhi3_exit:
.global __mulhisi3
.func __mulhisi3
__mulhisi3:
+#if defined (__AVR_HAVE_MUL__)
+ muls r21, r19
+ movw r24, r0
+ mul r20, r18
+ movw r22, r0
+ mulsu r21, r18
+ add r23, r0
+ adc r24, r1
+ clr r1
+ adc r25, r1
+ mulsu r19, r20
+ add r23, r0
+ adc r24, r1
+ clr r1
+ adc r25, r1
+ ret
+#else
mov_l r18, r24
mov_h r19, r25
clr r24
@@ -173,6 +190,7 @@ __mulhisi3:
dec r20
mov r21, r20
rjmp __mulsi3
+#endif /* defined (__AVR_HAVE_MUL__) */
.endfunc
#endif /* defined (L_mulhisi3) */
@@ -180,13 +198,31 @@ __mulhisi3:
.global __umulhisi3
.func __umulhisi3
__umulhisi3:
- mov_l r18, r24
- mov_h r19, r25
+#if defined (__AVR_HAVE_MUL__)
+ mul r21, r19
+ movw r24, r0
+ mul r20, r18
+ movw r22, r0
+ mul r21, r18
+ add r23, r0
+ adc r24, r1
+ clr r1
+ adc r25, r1
+ mul r19, r20
+ add r23, r0
+ adc r24, r1
+ clr r1
+ adc r25, r1
+ ret
+#else
+ mov_l r22, r20
+ mov_h r23, r21
clr r24
clr r25
clr r20
clr r21
rjmp __mulsi3
+#endif
.endfunc
#endif /* defined (L_umulhisi3) */
@@ -199,7 +235,6 @@ __umulhisi3:
#define r_arg1HL r24
#define r_arg1HH r25 /* multiplier High */
-
#define r_arg2L r18 /* multiplicand Low */
#define r_arg2H r19
#define r_arg2HL r20
@@ -555,6 +590,23 @@ __divmodsi4_neg1:
.endfunc
#endif /* defined (L_divmodsi4) */
+#undef r_remHH
+#undef r_remHL
+#undef r_remH
+#undef r_remL
+
+#undef r_arg1HH
+#undef r_arg1HL
+#undef r_arg1H
+#undef r_arg1L
+
+#undef r_arg2HH
+#undef r_arg2HL
+#undef r_arg2H
+#undef r_arg2L
+
+#undef r_cnt
+
/**********************************
* This is a prologue subroutine
**********************************/
@@ -897,3 +949,4 @@ __tablejump_elpm__:
.endfunc
#endif /* defined (L_tablejump_elpm) */
+#include "libgcc-fixed.S"
diff -rupN gcc/config/avr/t-avr gcc/config/avr/t-avr
--- gcc/config/avr/t-avr 2009-12-24 14:32:38.000000000 -0600
+++ gcc/config/avr/t-avr 2010-09-21 14:31:30.000000000 -0500
@@ -36,6 +36,8 @@ LIB1ASMSRC = avr/libgcc.S
LIB1ASMFUNCS = \
_mulqi3 \
_mulhi3 \
+ _mulhisi3 \
+ _umulhisi3 \
_mulsi3 \
_udivmodqi4 \
_divmodqi4 \
@@ -54,6 +56,39 @@ LIB1ASMFUNCS = \
_ctors \
_dtors
+# Fixed point routines
+LIB1ASMFUNCS += \
+ _fractqqsf \
+ _fractuqqsf \
+ _fracthqsf \
+ _fractuhqsf \
+ _fracthasf \
+ _fractuhasf \
+ _fractsasf \
+ _fractusasf \
+ _fractsfqq \
+ _fractsfuqq \
+ _fractsfhq \
+ _fractsfuhq \
+ _fractsfha \
+ _fractsfsa \
+ _mulqq3 \
+ _muluqq3 \
+ _mulhq3 \
+ _muluhq3 \
+ _mulha3 \
+ _muluha3 \
+ _mulsa3 \
+ _mulusa3 \
+ _divqq3 \
+ _udivuqq3 \
+ _divhq3 \
+ _udivuhq3 \
+ _divha3 \
+ _udivuha3 \
+ _divsa3 \
+ _udivusa3
+
# We do not have the DF type.
# Most of the C functions in libgcc2 use almost all registers,
# so use -mcall-prologues for smaller code size.
diff -rupN gcc/cse.c gcc/cse.c
--- gcc/cse.c 2010-01-12 14:25:10.000000000 -0600
+++ gcc/cse.c 2010-09-21 14:31:30.000000000 -0500
@@ -3703,9 +3703,10 @@ fold_rtx (rtx x, rtx insn)
&& exact_log2 (- INTVAL (const_arg1)) >= 0)))
break;
- /* ??? Vector mode shifts by scalar
+ /* ??? Vector and Fixed Point shifts by scalar
shift operand are not supported yet. */
- if (is_shift && VECTOR_MODE_P (mode))
+ if (is_shift && (VECTOR_MODE_P (mode)
+ || ALL_FIXED_POINT_MODE_P (mode)))
break;
if (is_shift
diff -rupN gcc/dwarf2out.c gcc/dwarf2out.c
--- gcc/dwarf2out.c 2010-07-01 07:31:19.000000000 -0500
+++ gcc/dwarf2out.c 2010-09-21 14:35:35.000000000 -0500
@@ -12017,6 +12017,12 @@ base_type_die (tree type)
add_AT_unsigned (base_type_result, DW_AT_byte_size,
int_size_in_bytes (type));
+
+ /* version 3 dwarf specifies that for fixed-point types DW_AT_binary_scale
+ describes the location of the decimal place */
+ if (TREE_CODE (type) == FIXED_POINT_TYPE)
+ add_AT_int (base_type_result, DW_AT_binary_scale, -TYPE_FBIT (type));
+
add_AT_unsigned (base_type_result, DW_AT_encoding, encoding);
return base_type_result;
@@ -15482,7 +15488,11 @@ add_const_value_attribute (dw_die_ref di
case HIGH:
case CONST_FIXED:
- return false;
+ {
+ add_AT_double (die, DW_AT_const_value,
+ CONST_FIXED_VALUE_HIGH (rtl), CONST_FIXED_VALUE_LOW (rtl));
+ }
+ break;
case MEM:
if (GET_CODE (XEXP (rtl, 0)) == CONST_STRING
diff -rupN gcc/fold-const.c gcc/fold-const.c
--- gcc/fold-const.c 2010-04-06 05:36:57.000000000 -0500
+++ gcc/fold-const.c 2010-09-21 14:31:30.000000000 -0500
@@ -12305,6 +12305,11 @@ fold_binary_loc (location_t loc,
if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0)
return NULL_TREE;
+ /* Since fixed point types cannot perform bitwise and, or, etc..
+ don't try to convert to an expression with them. */
+ if (TREE_CODE(type) == FIXED_POINT_TYPE)
+ return NULL_TREE;
+
/* Turn (a OP c1) OP c2 into a OP (c1+c2). */
if (TREE_CODE (op0) == code && host_integerp (arg1, false)
&& TREE_INT_CST_LOW (arg1) < TYPE_PRECISION (type)
diff -rupN gcc/varasm.c gcc/varasm.c
--- gcc/varasm.c 2010-03-27 06:56:30.000000000 -0500
+++ gcc/varasm.c 2010-09-21 14:31:30.000000000 -0500
@@ -2709,7 +2709,7 @@ assemble_integer (rtx x, unsigned int si
else
mclass = MODE_INT;
- omode = mode_for_size (subsize * BITS_PER_UNIT, mclass, 0);
+ omode = mode_for_size (subsize * BITS_PER_UNIT, MODE_INT, 0);
imode = mode_for_size (size * BITS_PER_UNIT, mclass, 0);
for (i = 0; i < size; i += subsize)