|
1 | 1 | Auto generated patch. Do not edit or delete it, even if empty.
|
2 |
| -diff -ruN --strip-trailing-cr a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp |
3 |
| ---- a/clang/lib/Sema/SemaDecl.cpp |
4 |
| -+++ b/clang/lib/Sema/SemaDecl.cpp |
5 |
| -@@ -4755,8 +4755,16 @@ |
6 |
| - return; |
7 |
| - } |
8 |
| - } else { |
9 |
| -- Diag(New->getLocation(), diag::warn_cxx_compat_tentative_definition) << New; |
10 |
| -- Diag(Old->getLocation(), diag::note_previous_declaration); |
11 |
| -+ // C++ may not have a tentative definition rule, but it has a different |
12 |
| -+ // rule about what constitutes a definition in the first place. See |
13 |
| -+ // [basic.def]p2 for details, but the basic idea is: if the old declaration |
14 |
| -+ // contains the extern specifier and doesn't have an initializer, it's fine |
15 |
| -+ // in C++. |
16 |
| -+ if (Old->getStorageClass() != SC_Extern || Old->hasInit()) { |
17 |
| -+ Diag(New->getLocation(), diag::warn_cxx_compat_tentative_definition) |
18 |
| -+ << New; |
19 |
| -+ Diag(Old->getLocation(), diag::note_previous_declaration); | <
A3D4
/tr>
| 2 | +diff -ruN --strip-trailing-cr a/clang/include/clang/Sema/Overload.h b/clang/include/clang/Sema/Overload.h |
| 3 | +--- a/clang/include/clang/Sema/Overload.h |
| 4 | ++++ b/clang/include/clang/Sema/Overload.h |
| 5 | +@@ -430,8 +430,15 @@ |
| 6 | + if (!ReferenceBinding) { |
| 7 | + #ifndef NDEBUG |
| 8 | + auto Decay = [&](QualType T) { |
| 9 | +- return (T->isArrayType() || T->isFunctionType()) ? C.getDecayedType(T) |
| 10 | +- : T; |
| 11 | ++ if (T->isArrayType() || T->isFunctionType()) |
| 12 | ++ T = C.getDecayedType(T); |
| 13 | ++ |
| 14 | ++ // A function pointer type can be resolved to a member function type, |
| 15 | ++ // which is still an identity conversion. |
| 16 | ++ if (auto *N = T->getAs<MemberPointerType>(); |
| 17 | ++ N && N->isMemberFunctionPointer()) |
| 18 | ++ T = C.getDecayedType(N->getPointeeType()); |
| 19 | ++ return T; |
| 20 | + }; |
| 21 | + // The types might differ if there is an array-to-pointer conversion |
| 22 | + // an function-to-pointer conversion, or lvalue-to-rvalue conversion. |
| 23 | +diff -ruN --strip-trailing-cr a/clang/test/SemaCXX/overload-resolution-deferred-templates.cpp b/clang/test/SemaCXX/overload-resolution-deferred-templates.cpp |
| 24 | +--- a/clang/test/SemaCXX/overload-resolution-deferred-templates.cpp |
| 25 | ++++ b/clang/test/SemaCXX/overload-resolution-deferred-templates.cpp |
| 26 | +@@ -232,3 +232,45 @@ |
| 27 | + }; |
| 28 | + |
| 29 | + InitListAreNotPerfectCpy InitListAreNotPerfectCpy_test({InitListAreNotPerfectCpy{}}); |
| 30 | ++ |
| 31 | ++namespace PointerToMemFunc { |
| 32 | ++template <typename> |
| 33 | ++class A; |
| 34 | ++struct N { |
| 35 | ++ template <typename T> |
| 36 | ++ void f(T); |
| 37 | ++}; |
| 38 | ++template <typename T> |
| 39 | ++struct E { |
| 40 | ++ template <class = A<int>> |
| 41 | ++ void g() = delete; |
| 42 | ++ void g(void (T::*)(char)); |
| 43 | ++}; |
| 44 | ++void f() { |
| 45 | ++ E<N> e; |
| 46 | ++ e.g(&N::f); |
| 47 | ++} |
| 48 | ++} |
| 49 | ++ |
| 50 | ++#if __cplusplus >= 201402 |
| 51 | ++namespace PointerToMemData { |
| 52 | ++struct N { |
| 53 | ++ int field; |
| 54 | ++}; |
| 55 | ++template <typename It, typename T> |
| 56 | ++struct B { |
| 57 | ++ B(It, T); |
| 58 | ++ template <typename It2> |
| 59 | ++ B(B<It2, T>); |
| 60 | ++}; |
| 61 | ++template <typename T> |
| 62 | ++struct C { |
| 63 | ++ auto g() { return B<int, T>(0, T{}); } |
| 64 | ++}; |
| 65 | ++void f() { |
| 66 | ++ using T = decltype(C<decltype(&N::field)>{}.g()); |
| 67 | ++} |
| 68 | ++ |
| 69 | ++} |
| 70 | ++ |
| 71 | ++#endif |
| 72 | +diff -ruN --strip-trailing-cr a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp |
| 73 | +--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp |
| 74 | ++++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp |
| 75 | +@@ -5771,6 +5771,35 @@ |
| 76 | + return false; |
| 77 | + } |
| 78 | + |
| 79 | ++// Find an insert position of Addr for MemoryInst. We can't guarantee MemoryInst |
| 80 | ++// is the first instruction that will use Addr. So we need to find the first |
| 81 | ++// user of Addr in current BB. |
| 82 | ++static BasicBlock::iterator findInsertPos(Value *Addr, Instruction *MemoryInst, |
| 83 | ++ Value *SunkAddr) { |
| 84 | ++ if (Addr->hasOneUse()) |
| 85 | ++ return MemoryInst->getIterator(); |
| 86 | ++ |
| 87 | ++ // We already have a SunkAddr in current BB, but we may need to insert cast |
| 88 | ++ // instruction after it. |
| 89 | ++ if (SunkAddr) { |
| 90 | ++ if (Instruction *AddrInst = dyn_cast<Instruction>(SunkAddr)) |
| 91 | ++ return std::next(AddrInst->getIterator()); |
| 92 | ++ } |
| 93 | ++ |
| 94 | ++ // Find the first user of Addr in current BB. |
| 95 | ++ Instruction *Earliest = MemoryInst; |
| 96 | ++ for (User *U : Addr->users()) { |
| 97 | ++ Instruction *UserInst = dyn_cast<Instruction>(U); |
| 98 | ++ if (UserInst && UserInst->getParent() == MemoryInst->getParent()) { |
| 99 | ++ if (isa<PHINode>(UserInst) || UserInst->isDebugOrPseudoInst()) |
| 100 | ++ continue; |
| 101 | ++ if (UserInst->comesBefore(Earliest)) |
| 102 | ++ Earliest = UserInst; |
20 | 103 | + }
|
| 104 | ++ } |
| 105 | ++ return Earliest->getIterator(); |
| 106 | ++} |
| 107 | ++ |
| 108 | + /// Sink addressing mode computation immediate before MemoryInst if doing so |
| 109 | + /// can be done without increasing register pressure. The need for the |
| 110 | + /// register pressure constraint means this can end up being an all or nothing |
| 111 | +@@ -5895,11 +5924,6 @@ |
| 112 | + return Modified; |
21 | 113 | }
|
22 | 114 |
|
23 |
| - if (haveIncompatibleLanguageLinkages(Old, New)) { |
24 |
| -diff -ruN --strip-trailing-cr a/clang/test/Sema/warn-tentative-defn-compat.c b/clang/test/Sema/warn-tentative-defn-compat.c |
25 |
| ---- a/clang/test/Sema/warn-tentative-defn-compat.c |
26 |
| -+++ b/clang/test/Sema/warn-tentative-defn-compat.c |
27 |
| -@@ -20,4 +20,7 @@ |
28 |
| - cxx-error {{redefinition of 'k'}} |
| 115 | +- // Insert this computation right after this user. Since our caller is |
| 116 | +- // scanning from the top of the BB to the bottom, reuse of the expr are |
| 117 | +- // guaranteed to happen later. |
| 118 | +- IRBuilder<> Builder(MemoryInst); |
| 119 | +- |
| 120 | + // Now that we determined the addressing expression we want to use and know |
| 121 | + // that we have to sink it into this block. Check to see if we have already |
| 122 | + // done this for some other load/store instr in this block. If so, reuse |
| 123 | +@@ -5910,6 +5934,13 @@ |
| 124 | + |
| 125 | + Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; |
| 126 | + Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); |
| 127 | ++ |
| 128 | ++ // The current BB may be optimized multiple times, we can't guarantee the |
| 129 | ++ // reuse of Addr happens later, call findInsertPos to find an appropriate |
| 130 | ++ // insert position. |
| 131 | ++ IRBuilder<> Builder(MemoryInst->getParent(), |
| 132 | ++ findInsertPos(Addr, MemoryInst, SunkAddr)); |
| 133 | ++ |
| 134 | + if (SunkAddr) { |
| 135 | + LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode |
| 136 | + << " for " << *MemoryInst << "\n"); |
| 137 | +diff -ruN --strip-trailing-cr a/llvm/test/Transforms/CodeGenPrepare/X86/sink-addr-reuse.ll b/llvm/test/Transforms/CodeGenPrepare/X86/sink-addr-reuse.ll |
| 138 | +--- a/llvm/test/Transforms/CodeGenPrepare/X86/sink-addr-reuse.ll |
| 139 | ++++ b/llvm/test/Transforms/CodeGenPrepare/X86/sink-addr-reuse.ll |
| 140 | +@@ -0,0 +1,44 @@ |
| 141 | ++; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| 142 | ++; RUN: opt -S -p 'require<profile-summary>,codegenprepare' -cgpp-huge-func=0 < %s | FileCheck %s |
| 143 | ++ |
| 144 | ++target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" |
| 145 | ++target triple = "x86_64-grtev4-linux-gnu" |
| 146 | ++ |
| 147 | ++declare void @g(ptr) |
| 148 | ++ |
| 149 | ++; %load and %load5 use the same address, %load5 is optimized first, %load is |
| 150 | ++; optimized later and reuse the same address computation instruction. We must |
| 151 | ++; make sure not to generate use before def error. |
| 152 | ++ |
| 153 | ++define void @f(ptr %arg) { |
| 154 | ++; CHECK-LABEL: define void @f( |
| 155 | ++; CHECK-SAME: ptr [[ARG:%.*]]) { |
| 156 | ++; CHECK-NEXT: [[BB:.*:]] |
| 157 | ++; CHECK-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr i8, ptr [[ARG]], i64 -64 |
| 158 | ++; CHECK-NEXT: call void @g(ptr [[GETELEMENTPTR]]) |
| 159 | ++; CHECK-NEXT: [[SUNKADDR1:%.*]] = getelementptr i8, ptr [[ARG]], i64 -64 |
| 160 | ++; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[SUNKADDR1]], align 8 |
| 161 | ++; CHECK-NEXT: [[SUNKADDR:%.*]] = getelementptr i8, ptr [[ARG]], i64 -56 |
| 162 | ++; CHECK-NEXT: [[LOAD4:%.*]] = load i32, ptr [[SUNKADDR]], align 8 |
| 163 | ++; CHECK-NEXT: [[LOAD5:%.*]] = load ptr, ptr [[SUNKADDR1]], align 8 |
| 164 | ++; CHECK-NEXT: [[TMP0:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 1, i32 0) |
| 165 | ++; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP0]], 0 |
| 166 | ++; CHECK-NEXT: ret void |
| 167 | ++; |
| 168 | ++bb: |
| 169 | ++ %getelementptr = getelementptr i8, ptr %arg, i64 -64 |
| 170 | ++ %getelementptr1 = getelementptr i8, ptr %arg, i64 -56 |
| 171 | ++ call void @g(ptr %getelementptr) |
| 172 | ++ br label %bb3 |
| 173 | ++ |
| 174 | ++bb3: |
| 175 | ++ %load = load ptr, ptr %getelementptr, align 8 |
| 176 | ++ %load4 = load i32, ptr %getelementptr1, align 8 |
| 177 | ++ %load5 = load ptr, ptr %getelementptr, align 8 |
| 178 | ++ %add = add i32 1, 0 |
| 179 | ++ %icmp = icmp eq i32 %add, 0 |
| 180 | ++ br i1 %icmp, label %bb7, label %bb7 |
| 181 | ++ |
| 182 | ++bb7: |
| 183 | ++ ret void |
| 184 | ++} |
| 185 | +diff -ruN --strip-trailing-cr a/mlir/include/mlir/Query/Matcher/SliceMatchers.h b/mlir/include/mlir/Query/Matcher/SliceMatchers.h |
| 186 | +--- a/mlir/include/mlir/Query/Matcher/SliceMatchers.h |
| 187 | ++++ b/mlir/include/mlir/Query/Matcher/SliceMatchers.h |
| 188 | +@@ -14,6 +14,7 @@ |
| 189 | + #define MLIR_TOOLS_MLIRQUERY_MATCHERS_SLICEMATCHERS_H |
| 190 | + |
| 191 | + #include "mlir/Analysis/SliceAnalysis.h" |
| 192 | ++#include "mlir/IR/Operation.h" |
29 | 193 |
|
30 |
| - // Cannot have two declarations with initializers, that is a redefinition in |
31 |
| --// both C and C++. |
32 |
| -+// both C and C++. However, C++ does have a different definition of what makes |
33 |
| -+// a declaration a definition. |
34 |
| -+extern const int a; |
35 |
| -+const int a = 12; // Okay in C and C++ |
36 |
| -diff -ruN --strip-trailing-cr a/mlir/lib/TableGen/Pattern.cpp b/mlir/lib/TableGen/Pattern.cpp |
37 |
| ---- a/mlir/lib/TableGen/Pattern.cpp |
38 |
| -+++ b/mlir/lib/TableGen/Pattern.cpp |
39 |
| -@@ -304,8 +304,8 @@ |
40 |
| - assert(index < 0); |
41 |
| - auto *operand = cast<NamedTypeConstraint *>(op->getArg(getArgIndex())); |
42 |
| - if (operand->isOptional()) { |
43 |
| -- auto repl = |
44 |
| -- formatv(fmt, formatv("({0}.empty() ? Value() : *{0}.begin())", name)); |
45 |
| -+ auto repl = formatv( |
46 |
| -+ fmt, formatv("({0}.empty() ? ::mlir::Value() : *{0}.begin())", name)); |
47 |
| - LLVM_DEBUG(dbgs() << repl << " (OptionalOperand)\n"); |
48 |
| - return std::string(repl); |
49 |
| - } |
| 194 | + /// A matcher encapsulating `getBackwardSlice` method from SliceAnalysis.h. |
| 195 | + /// Additionally, it limits the slice computation to a certain depth level using |
| 196 | +diff -ruN --strip-trailing-cr a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel |
| 197 | +--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel |
| 198 | ++++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel |
| 199 | +@@ -12874,6 +12874,7 @@ |
| 200 | + ), |
| 201 | + includes = ["include"], |
| 202 | + deps = [ |
| 203 | ++ ":Analysis", |
| 204 | + ":FuncDialect", |
| 205 | + ":IR", |
| 206 | + ":Reducer", |
0 commit comments