aboutsummaryrefslogtreecommitdiff
path: root/tests/tcg/xtensa/test_dfp0_arith.S
blob: 53bf8122d082766edc5275121e100d8f62861afc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
#include "macros.inc"
#include "fpu.h"

test_suite fp0_arith

#if XCHAL_HAVE_DFP

.macro movfp fr, v
    movi    a2, ((\v) >> 32) & 0xffffffff
    movi    a3, ((\v) & 0xffffffff)
    wfrd    \fr, a2, a3
.endm

.macro check_res fr, r, sr
    rfrd    a2, \fr
    dump    a2
    movi    a3, ((\r) >> 32) & 0xffffffff
    assert  eq, a2, a3
    rfr    a2, \fr
    dump    a2
    movi    a3, ((\r) & 0xffffffff)
    assert  eq, a2, a3
    rur     a2, fsr
    movi    a3, \sr
    assert  eq, a2, a3
.endm

test add_d
    movi    a2, 1
    wsr     a2, cpenable

    /* MAX_FLOAT + MAX_FLOAT = +inf/MAX_FLOAT  */
    test_op2 add.d, f6, f7, f8, F64_MAX, F64_MAX, \
        F64_PINF, F64_MAX, F64_PINF, F64_MAX, \
          FSR_OI,  FSR_OI,   FSR_OI,  FSR_OI
test_end

test add_d_inf
    /* 1 + +inf = +inf  */
    test_op2 add.d, f6, f7, f8, F64_1, F64_PINF, \
        F64_PINF, F64_PINF, F64_PINF, F64_PINF, \
           FSR__,    FSR__,    FSR__,    FSR__

    /* +inf + -inf = default NaN */
    test_op2 add.d, f0, f1, f2, F64_PINF, F64_NINF, \
        F64_DNAN, F64_DNAN, F64_DNAN, F64_DNAN, \
           FSR_V,    FSR_V,    FSR_V,    FSR_V
test_end

test add_d_nan_dfpu
    /* 1 + QNaN = QNaN  */
    test_op2 add.d, f9, f10, f11, F64_1, F64_QNAN(1), \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR__,       FSR__,       FSR__,       FSR__
    /* 1 + SNaN = QNaN  */
    test_op2 add.d, f12, f13, f14, F64_1, F64_SNAN(1), \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR_V,       FSR_V,       FSR_V,       FSR_V

    /* SNaN1 + SNaN2 = QNaN2 */
    test_op2 add.d, f15, f0, f1, F64_SNAN(1), F64_SNAN(2), \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR_V,       FSR_V,       FSR_V,       FSR_V
    /* QNaN1 + SNaN2 = QNaN2 */
    test_op2 add.d, f5, f6, f7, F64_QNAN(1), F64_SNAN(2), \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR_V,       FSR_V,       FSR_V,       FSR_V
    /* SNaN1 + QNaN2 = QNaN2 */
    test_op2 add.d, f8, f9, f10, F64_SNAN(1), F64_QNAN(2), \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR_V,       FSR_V,       FSR_V,       FSR_V
test_end

test sub_d
    /* norm - norm = denorm */
    test_op2 sub.d, f6, f7, f8, F64_MIN_NORM | 1, F64_MIN_NORM, \
        0x00000001, 0x00000001, 0x00000001, 0x00000001, \
             FSR__,      FSR__,      FSR__,      FSR__
test_end

test mul_d
    test_op2 mul.d, f0, f1, f2, F64_1 | 1, F64_1 | 1, \
        F64_1 | 2, F64_1 | 2, F64_1 | 3, F64_1 | 2, \
            FSR_I,     FSR_I,     FSR_I,     FSR_I
    /* MAX_FLOAT/2 * MAX_FLOAT/2 = +inf/MAX_FLOAT  */
    test_op2 mul.d, f6, f7, f8, F64_MAX_2, F64_MAX_2, \
        F64_PINF, F64_MAX, F64_PINF, F64_MAX, \
          FSR_OI,  FSR_OI,   FSR_OI,  FSR_OI
    /* min norm * min norm = 0/denorm */
    test_op2 mul.d, f6, f7, f8, F64_MIN_NORM, F64_MIN_NORM, \
         F64_0,  F64_0, 0x00000001,  F64_0, \
        FSR_UI, FSR_UI,     FSR_UI, FSR_UI
    /* inf * 0 = default NaN */
    test_op2 mul.d, f6, f7, f8, F64_PINF, F64_0, \
        F64_DNAN, F64_DNAN, F64_DNAN, F64_DNAN, \
           FSR_V,    FSR_V,    FSR_V,    FSR_V
test_end

test madd_d
    test_op3 madd.d, f0, f1, f2, f0, F64_0, F64_1 | 1, F64_1 | 1, \
        F64_1 | 2, F64_1 | 2, F64_1 | 3, F64_1 | 2, \
            FSR_I,     FSR_I,     FSR_I,     FSR_I
test_end

test madd_d_precision
    test_op3 madd.d, f0, f1, f2, f0, \
        F64_MINUS | F64_1 | 2, F64_1 | 1, F64_1 | 1, \
        0x3970000000000000, 0x3970000000000000, 0x3970000000000000, 0x3970000000000000, \
             FSR__,      FSR__,      FSR__,      FSR__
test_end

test madd_d_nan_dfpu
    /* DFPU madd/msub NaN1, NaN2, NaN3 priority: NaN1, NaN3, NaN2 */
    test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_1, F64_1, \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR__,       FSR__,       FSR__,       FSR__
    test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_QNAN(2), F64_1, \
        F64_QNAN(2), F64_QNAN(2), F64_QNAN(2), F64_QNAN(2), \
              FSR__,       FSR__,       FSR__,       FSR__
    test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_1, F64_QNAN(3), \
        F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), \
              FSR__,       FSR__,       FSR__,       FSR__

    test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_QNAN(2), F64_1, \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR__,       FSR__,       FSR__,       FSR__
    test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_1, F64_QNAN(3), \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR__,       FSR__,       FSR__,       FSR__
    test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_QNAN(2), F64_QNAN(3), \
        F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), \
              FSR__,       FSR__,       FSR__,       FSR__

    test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_QNAN(2), F64_QNAN(3), \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR__,       FSR__,       FSR__,       FSR__

    /* inf * 0 = default NaN */
    test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_PINF, F64_0, \
        F64_DNAN, F64_DNAN, F64_DNAN, F64_DNAN, \
           FSR_V,    FSR_V,    FSR_V,    FSR_V
    /* inf * 0 + SNaN1 = QNaN1 */
    test_op3 madd.d, f0, f1, f2, f0, F64_SNAN(1), F64_PINF, F64_0, \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR_V,       FSR_V,       FSR_V,       FSR_V
    /* inf * 0 + QNaN1 = QNaN1 */
    test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_PINF, F64_0, \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR_V,       FSR_V,       FSR_V,       FSR_V

    /* madd/msub SNaN turns to QNaN and sets Invalid flag */
    test_op3 madd.d, f0, f1, f2, f0, F64_SNAN(1), F64_1, F64_1, \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR_V,       FSR_V,       FSR_V,       FSR_V
    test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_SNAN(2), F64_1, \
        F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
              FSR_V,       FSR_V,       FSR_V,       FSR_V
test_end

#endif

test_suite_end