aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorrsandifo <rsandifo@138bc75d-0d04-0410-961f-82ee72b054a4>2018-01-19 11:57:34 +0000
committerrsandifo <rsandifo@138bc75d-0d04-0410-961f-82ee72b054a4>2018-01-19 11:57:34 +0000
commit4ca4c75db56ee3a4fc59afecec7f71f6deae968c (patch)
treec5cf9e658d84fb886f540d045d804e2242056a67
parent83447a6fa1a4bc33c5c68a8378d14f24390bc272 (diff)
Avoid ICE for nested inductions (PR 83914)
This testcase ICEd because we converted the initial value of an induction to the vector element type even for nested inductions. This isn't necessary because the initial expression is vectorised normally, and it meant that init_expr was no longer the original statement operand by the time we called vect_get_vec_def_for_operand. Also, adding the conversion code here made the existing SLP conversion redundant. 2018-01-19 Richard Sandiford <richard.sandiford@linaro.org> gcc/ PR tree-optimization/83914 * tree-vect-loop.c (vectorizable_induction): Don't convert init_expr or apply the peeling adjustment for inductions that are nested within the vectorized loop. gcc/testsuite/ PR tree-optimization/83914 * gcc.dg/vect/pr83914.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@256884 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog7
-rw-r--r--gcc/testsuite/ChangeLog5
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr83914.c15
-rw-r--r--gcc/tree-vect-loop.c50
4 files changed, 50 insertions, 27 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 5feefb2dda8..a86ea914ca5 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,10 @@
+2018-01-19 Richard Sandiford <richard.sandiford@linaro.org>
+
+ PR tree-optimization/83914
+ * tree-vect-loop.c (vectorizable_induction): Don't convert
+ init_expr or apply the peeling adjustment for inductions
+ that are nested within the vectorized loop.
+
2018-01-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
* config/arm/thumb2.md (*thumb2_negsi2_short): Use RSB mnemonic
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index ef525655391..44080e0b54a 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,8 @@
+2018-01-19 Richard Sandiford <richard.sandiford@linaro.org>
+
+ PR tree-optimization/83914
+ * gcc.dg/vect/pr83914.c: New test.
+
2018-01-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
* gcc.target/arm/negdi-1.c: Remove bogus assembler scan for negs.
diff --git a/gcc/testsuite/gcc.dg/vect/pr83914.c b/gcc/testsuite/gcc.dg/vect/pr83914.c
new file mode 100644
index 00000000000..0bef7986bc2
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr83914.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O3" } */
+
+struct s { struct s *ptrs[16]; } *a, *b;
+int c;
+void
+foo (int n)
+{
+ for (; n; a = b, n--)
+ {
+ b = a + 1;
+ for (c = 8; c; c--)
+ a->ptrs[c] = b;
+ }
+}
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 79b818608df..8b2ecf84e3f 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -7678,28 +7678,33 @@ vectorizable_induction (gimple *phi,
init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
loop_preheader_edge (iv_loop));
- /* Convert the initial value and step to the desired type. */
stmts = NULL;
- init_expr = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
- step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
-
- /* If we are using the loop mask to "peel" for alignment then we need
- to adjust the start value here. */
- tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
- if (skip_niters != NULL_TREE)
+ if (!nested_in_vect_loop)
{
- if (FLOAT_TYPE_P (vectype))
- skip_niters = gimple_build (&stmts, FLOAT_EXPR, TREE_TYPE (vectype),
- skip_niters);
- else
- skip_niters = gimple_convert (&stmts, TREE_TYPE (vectype),
- skip_niters);
- tree skip_step = gimple_build (&stmts, MULT_EXPR, TREE_TYPE (vectype),
- skip_niters, step_expr);
- init_expr = gimple_build (&stmts, MINUS_EXPR, TREE_TYPE (vectype),
- init_expr, skip_step);
+ /* Convert the initial value to the desired type. */
+ tree new_type = TREE_TYPE (vectype);
+ init_expr = gimple_convert (&stmts, new_type, init_expr);
+
+ /* If we are using the loop mask to "peel" for alignment then we need
+ to adjust the start value here. */
+ tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
+ if (skip_niters != NULL_TREE)
+ {
+ if (FLOAT_TYPE_P (vectype))
+ skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
+ skip_niters);
+ else
+ skip_niters = gimple_convert (&stmts, new_type, skip_niters);
+ tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
+ skip_niters, step_expr);
+ init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
+ init_expr, skip_step);
+ }
}
+ /* Convert the step to the desired type. */
+ step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
+
if (stmts)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
@@ -7718,15 +7723,6 @@ vectorizable_induction (gimple *phi,
/* Enforced above. */
unsigned int const_nunits = nunits.to_constant ();
- /* Convert the init to the desired type. */
- stmts = NULL;
- init_expr = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
- if (stmts)
- {
- new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
- gcc_assert (!new_bb);
- }
-
/* Generate [VF*S, VF*S, ... ]. */
if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
{