Skip to content

Commit 47c4f54

Browse files
committed
[Hexagon] Omit calls to specialized {float,fix} routines
These were introduced in 1213a7a (Hexagon backend support, 2011-12-12) but they aren't present in libclangrt.builtins-hexagon. The generic versions of these functions are present in the builtins, though. So it should suffice to call those instead.
1 parent 85601fd commit 47c4f54

File tree

2 files changed

+139
-7
lines changed

2 files changed

+139
-7
lines changed

llvm/lib/Target/Hexagon/HexagonISelLowering.cpp

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1860,13 +1860,6 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
18601860
setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
18611861
setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
18621862

1863-
setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1864-
setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1865-
setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1866-
setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1867-
setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1868-
setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1869-
18701863
// This is the only fast library function for sqrtd.
18711864
if (FastMath)
18721865
setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2");
Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
; RUN: llc < %s -mtriple=hexagon-unknown-CHECK-musl \
2+
; RUN: | FileCheck %s -check-prefix=CHECK
3+
4+
define i64 @double_to_i128(double %d) nounwind strictfp {
5+
; CHECK-LABEL: double_to_i128:
6+
; CHECK: // %bb.0:
7+
; CHECK: call __fixdfti
8+
; CHECK: dealloc_return
9+
%1 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f64(double %d, metadata !"fpexcept.strict")
10+
%2 = trunc i128 %1 to i64
11+
ret i64 %2
12+
}
13+
14+
define i64 @double_to_ui128(double %d) nounwind strictfp {
15+
; CHECK-LABEL: double_to_ui128:
16+
; CHECK: // %bb.0:
17+
; CHECK: call __fixunsdfti
18+
; CHECK: dealloc_return
19+
%1 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f64(double %d, metadata !"fpexcept.strict")
20+
%2 = trunc i128 %1 to i64
21+
ret i64 %2
22+
}
23+
24+
define i64 @float_to_i128(float %d) nounwind strictfp {
25+
; CHECK-LABEL: float_to_i128:
26+
; CHECK: // %bb.0:
27+
; CHECK: call __fixsfti
28+
; CHECK: dealloc_return
29+
%1 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f32(float %d, metadata !"fpexcept.strict")
30+
%2 = trunc i128 %1 to i64
31+
ret i64 %2
32+
}
33+
34+
define i64 @float_to_ui128(float %d) nounwind strictfp {
35+
; CHECK-LABEL: float_to_ui128:
36+
; CHECK: // %bb.0:
37+
; CHECK: call __fixunssfti
38+
; CHECK: dealloc_return
39+
%1 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f32(float %d, metadata !"fpexcept.strict")
40+
%2 = trunc i128 %1 to i64
41+
ret i64 %2
42+
}
43+
44+
define i64 @longdouble_to_i128(ptr nocapture readonly %0) nounwind strictfp {
45+
; CHECK-LABEL: longdouble_to_i128:
46+
; CHECK: // %bb.0:
47+
; CHECK: call __fixxfti
48+
; CHECK: dealloc_return
49+
%2 = load x86_fp80, ptr %0, align 16
50+
%3 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f80(x86_fp80 %2, metadata !"fpexcept.strict")
51+
%4 = trunc i128 %3 to i64
52+
ret i64 %4
53+
}
54+
55+
define i64 @longdouble_to_ui128(ptr nocapture readonly %0) nounwind strictfp {
56+
; CHECK-LABEL: longdouble_to_ui128:
57+
; CHECK: // %bb.0:
58+
; CHECK: call __fixunsxfti
59+
; CHECK: dealloc_return
60+
%2 = load x86_fp80, ptr %0, align 16
61+
%3 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f80(x86_fp80 %2, metadata !"fpexcept.strict")
62+
%4 = trunc i128 %3 to i64
63+
ret i64 %4
64+
}
65+
66+
define double @i128_to_double(ptr nocapture readonly %0) nounwind strictfp {
67+
; CHECK-LABEL: i128_to_double:
68+
; CHECK: // %bb.0:
69+
; CHECK: call __floattidf
70+
; CHECK: dealloc_return
71+
%2 = load i128, ptr %0, align 16
72+
%3 = tail call double @llvm.experimental.constrained.sitofp.f64.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
73+
ret double %3
74+
}
75+
76+
define double @ui128_to_double(ptr nocapture readonly %0) nounwind strictfp {
77+
; CHECK-LABEL: ui128_to_double:
78+
; CHECK: // %bb.0:
79+
; CHECK: call __floatuntidf
80+
; CHECK: dealloc_return
81+
%2 = load i128, ptr %0, align 16
82+
%3 = tail call double @llvm.experimental.constrained.uitofp.f64.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
83+
ret double %3
84+
}
85+
86+
define float @i128_to_float(ptr nocapture readonly %0) nounwind strictfp {
87+
; CHECK-LABEL: i128_to_float:
88+
; CHECK: // %bb.0:
89+
; CHECK: call __floattisf
90+
; CHECK: dealloc_return
91+
%2 = load i128, ptr %0, align 16
92+
%3 = tail call float @llvm.experimental.constrained.sitofp.f32.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
93+
ret float %3
94+
}
95+
96+
define float @ui128_to_float(ptr nocapture readonly %0) nounwind strictfp {
97+
; CHECK-LABEL: ui128_to_float:
98+
; CHECK: // %bb.0:
99+
; CHECK: call __floatuntisf
100+
; CHECK: dealloc_return
101+
%2 = load i128, ptr %0, align 16
102+
%3 = tail call float @llvm.experimental.constrained.uitofp.f32.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
103+
ret float %3
104+
}
105+
106+
define void @i128_to_longdouble(ptr noalias nocapture sret(x86_fp80) align 16 %agg.result, ptr nocapture readonly %0) nounwind strictfp {
107+
; CHECK-LABEL: i128_to_longdouble:
108+
; CHECK: // %bb.0:
109+
; CHECK: call __floattixf
110+
; CHECK: dealloc_return
111+
%2 = load i128, ptr %0, align 16
112+
%3 = tail call x86_fp80 @llvm.experimental.constrained.sitofp.f80.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
113+
store x86_fp80 %3, ptr %agg.result, align 16
114+
ret void
115+
}
116+
117+
define void @ui128_to_longdouble(ptr noalias nocapture sret(x86_fp80) align 16 %agg.result, ptr nocapture readonly %0) nounwind strictfp {
118+
; CHECK-LABEL: ui128_to_longdouble:
119+
; CHECK: // %bb.0:
120+
; CHECK: call __floatuntixf
121+
; CHECK: dealloc_return
122+
%2 = load i128, ptr %0, align 16
123+
%3 = tail call x86_fp80 @llvm.experimental.constrained.uitofp.f80.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
124+
store x86_fp80 %3, ptr %agg.result, align 16
125+
ret void
126+
}
127+
128+
declare i128 @llvm.experimental.constrained.fptosi.i128.f64(double, metadata)
129+
declare i128 @llvm.experimental.constrained.fptoui.i128.f64(double, metadata)
130+
declare i128 @llvm.experimental.constrained.fptosi.i128.f32(float, metadata)
131+
declare i128 @llvm.experimental.constrained.fptoui.i128.f32(float, metadata)
132+
declare i128 @llvm.experimental.constrained.fptosi.i128.f80(x86_fp80, metadata)
133+
declare i128 @llvm.experimental.constrained.fptoui.i128.f80(x86_fp80, metadata)
134+
declare double @llvm.experimental.constrained.sitofp.f64.i128(i128, metadata, metadata)
135+
declare double @llvm.experimental.constrained.uitofp.f64.i128(i128, metadata, metadata)
136+
declare float @llvm.experimental.constrained.sitofp.f32.i128(i128, metadata, metadata)
137+
declare float @llvm.experimental.constrained.uitofp.f32.i128(i128, metadata, metadata)
138+
declare x86_fp80 @llvm.experimental.constrained.sitofp.f80.i128(i128, metadata, metadata)
139+
declare x86_fp80 @llvm.experimental.constrained.uitofp.f80.i128(i128, metadata, metadata)

0 commit comments

Comments
 (0)