diff mbox series

[20/47] llvm: upgrade 18.1.8 -> 19.1.0

Message ID 1727246960-20665-20-git-send-email-wangmy@fujitsu.com
State New
Headers show
Series [01/47] adwaita-icon-theme: upgrade 46.2 -> 47.0 | expand

Commit Message

Mingyu Wang (Fujitsu) Sept. 25, 2024, 6:48 a.m. UTC
From: Wang Mingyu <wangmy@fujitsu.com>

Changelog:
 https://releases.llvm.org/19.1.0/docs/ReleaseNotes.html

0002-llvm-Fix-CVE-2024-0151.patch
removed since it's included in 19.1.0

Signed-off-by: Wang Mingyu <wangmy@fujitsu.com>
---
 .../llvm/0002-llvm-Fix-CVE-2024-0151.patch    | 1086 -----------------
 .../llvm/{llvm_18.1.8.bb => llvm_19.1.0.bb}   |    3 +-
 2 files changed, 1 insertion(+), 1088 deletions(-)
 delete mode 100644 meta/recipes-devtools/llvm/llvm/0002-llvm-Fix-CVE-2024-0151.patch
 rename meta/recipes-devtools/llvm/{llvm_18.1.8.bb => llvm_19.1.0.bb} (97%)
diff mbox series

Patch

diff --git a/meta/recipes-devtools/llvm/llvm/0002-llvm-Fix-CVE-2024-0151.patch b/meta/recipes-devtools/llvm/llvm/0002-llvm-Fix-CVE-2024-0151.patch
deleted file mode 100644
index c05685e64d..0000000000
--- a/meta/recipes-devtools/llvm/llvm/0002-llvm-Fix-CVE-2024-0151.patch
+++ /dev/null
@@ -1,1086 +0,0 @@ 
-commit 78ff617d3f573fb3a9b2fef180fa0fd43d5584ea
-Author: Lucas Duarte Prates <lucas.prates@arm.com>
-Date:   Thu Jun 20 10:22:01 2024 +0100
-
-    [ARM] CMSE security mitigation on function arguments and returned values (#89944)
-
-    The ABI mandates two things related to function calls:
-     - Function arguments must be sign- or zero-extended to the register
-       size by the caller.
-     - Return values must be sign- or zero-extended to the register size by
-       the callee.
-
-    As consequence, callees can assume that function arguments have been
-    extended and so can callers with regards to return values.
-
-    Here lies the problem: Nonsecure code might deliberately ignore this
-    mandate with the intent of attempting an exploit. It might try to pass
-    values that lie outside the expected type's value range in order to
-    trigger undefined behaviour, e.g. out of bounds access.
-
-    With the mitigation implemented, Secure code always performs extension
-    of values passed by Nonsecure code.
-
-    This addresses the vulnerability described in CVE-2024-0151.
-
-    Patches by Victor Campos.
-
-    ---------
-
-    Co-authored-by: Victor Campos <victor.campos@arm.com>
-
-Upstream-Status: Backport [https://github.com/llvm/llvm-project/commit/78ff617d3f573fb3a9b2fef180fa0fd43d5584ea]
-CVE: CVE-2024-0151
-Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
----
-diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
-index bfe137b95602..5490c3c9df6c 100644
---- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
-+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
-@@ -156,6 +156,17 @@ static const MCPhysReg GPRArgRegs[] = {
-   ARM::R0, ARM::R1, ARM::R2, ARM::R3
- };
- 
-+static SDValue handleCMSEValue(const SDValue &Value, const ISD::InputArg &Arg,
-+                               SelectionDAG &DAG, const SDLoc &DL) {
-+  assert(Arg.ArgVT.isScalarInteger());
-+  assert(Arg.ArgVT.bitsLT(MVT::i32));
-+  SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, Arg.ArgVT, Value);
-+  SDValue Ext =
-+      DAG.getNode(Arg.Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
-+                  MVT::i32, Trunc);
-+  return Ext;
-+}
-+
- void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT) {
-   if (VT != PromotedLdStVT) {
-     setOperationAction(ISD::LOAD, VT, Promote);
-@@ -2196,7 +2207,7 @@ SDValue ARMTargetLowering::LowerCallResult(
-     SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
-     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
-     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
--    SDValue ThisVal) const {
-+    SDValue ThisVal, bool isCmseNSCall) const {
-   // Assign locations to each value returned by this call.
-   SmallVector<CCValAssign, 16> RVLocs;
-   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
-@@ -2274,6 +2285,15 @@ SDValue ARMTargetLowering::LowerCallResult(
-         (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
-       Val = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Val);
- 
-+    // On CMSE Non-secure Calls, call results (returned values) whose bitwidth
-+    // is less than 32 bits must be sign- or zero-extended after the call for
-+    // security reasons. Although the ABI mandates an extension done by the
-+    // callee, the latter cannot be trusted to follow the rules of the ABI.
-+    const ISD::InputArg &Arg = Ins[VA.getValNo()];
-+    if (isCmseNSCall && Arg.ArgVT.isScalarInteger() &&
-+        VA.getLocVT().isScalarInteger() && Arg.ArgVT.bitsLT(MVT::i32))
-+      Val = handleCMSEValue(Val, Arg, DAG, dl);
-+
-     InVals.push_back(Val);
-   }
- 
-@@ -2888,7 +2908,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
-   // return.
-   return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
-                          InVals, isThisReturn,
--                         isThisReturn ? OutVals[0] : SDValue());
-+                         isThisReturn ? OutVals[0] : SDValue(), isCmseNSCall);
- }
- 
- /// HandleByVal - Every parameter *after* a byval parameter is passed
-@@ -4485,8 +4505,6 @@ SDValue ARMTargetLowering::LowerFormalArguments(
-                  *DAG.getContext());
-   CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
- 
--  SmallVector<SDValue, 16> ArgValues;
--  SDValue ArgValue;
-   Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
-   unsigned CurArgIdx = 0;
- 
-@@ -4541,6 +4559,7 @@ SDValue ARMTargetLowering::LowerFormalArguments(
-     // Arguments stored in registers.
-     if (VA.isRegLoc()) {
-       EVT RegVT = VA.getLocVT();
-+      SDValue ArgValue;
- 
-       if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) {
-         // f64 and vector types are split up into multiple registers or
-@@ -4604,16 +4623,6 @@ SDValue ARMTargetLowering::LowerFormalArguments(
-       case CCValAssign::BCvt:
-         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
-         break;
--      case CCValAssign::SExt:
--        ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
--                               DAG.getValueType(VA.getValVT()));
--        ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
--        break;
--      case CCValAssign::ZExt:
--        ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
--                               DAG.getValueType(VA.getValVT()));
--        ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
--        break;
-       }
- 
-       // f16 arguments have their size extended to 4 bytes and passed as if they
-@@ -4623,6 +4632,15 @@ SDValue ARMTargetLowering::LowerFormalArguments(
-           (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
-         ArgValue = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), ArgValue);
- 
-+      // On CMSE Entry Functions, formal integer arguments whose bitwidth is
-+      // less than 32 bits must be sign- or zero-extended in the callee for
-+      // security reasons. Although the ABI mandates an extension done by the
-+      // caller, the latter cannot be trusted to follow the rules of the ABI.
-+      const ISD::InputArg &Arg = Ins[VA.getValNo()];
-+      if (AFI->isCmseNSEntryFunction() && Arg.ArgVT.isScalarInteger() &&
-+          RegVT.isScalarInteger() && Arg.ArgVT.bitsLT(MVT::i32))
-+        ArgValue = handleCMSEValue(ArgValue, Arg, DAG, dl);
-+
-       InVals.push_back(ArgValue);
-     } else { // VA.isRegLoc()
-       // Only arguments passed on the stack should make it here.
-diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
-index 62a52bdb03f7..a255e9b6fc36 100644
---- a/llvm/lib/Target/ARM/ARMISelLowering.h
-+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
-@@ -891,7 +891,7 @@ class VectorType;
-                             const SmallVectorImpl<ISD::InputArg> &Ins,
-                             const SDLoc &dl, SelectionDAG &DAG,
-                             SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
--                            SDValue ThisVal) const;
-+                            SDValue ThisVal, bool isCmseNSCall) const;
- 
-     bool supportSplitCSR(MachineFunction *MF) const override {
-       return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
-diff --git a/llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll b/llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll
-new file mode 100644
-index 0000000000..58eef443c25e
---- /dev/null
-+++ b/llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll
-@@ -0,0 +1,552 @@
-+; RUN: llc %s -mtriple=thumbv8m.main     -o - | FileCheck %s --check-prefixes V8M-COMMON,V8M-LE
-+; RUN: llc %s -mtriple=thumbebv8m.main   -o - | FileCheck %s --check-prefixes V8M-COMMON,V8M-BE
-+; RUN: llc %s -mtriple=thumbv8.1m.main   -o - | FileCheck %s --check-prefixes V81M-COMMON,V81M-LE
-+; RUN: llc %s -mtriple=thumbebv8.1m.main -o - | FileCheck %s --check-prefixes V81M-COMMON,V81M-BE
-+
-+@get_idx = hidden local_unnamed_addr global ptr null, align 4
-+@arr = hidden local_unnamed_addr global [256 x i32] zeroinitializer, align 4
-+
-+define i32 @access_i16() {
-+; V8M-COMMON-LABEL: access_i16:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    push {r7, lr}
-+; V8M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V8M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V8M-COMMON-NEXT:    ldr r0, [r0]
-+; V8M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    bic r0, r0, #1
-+; V8M-COMMON-NEXT:    sub sp, #136
-+; V8M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    mov r1, r0
-+; V8M-COMMON-NEXT:    mov r2, r0
-+; V8M-COMMON-NEXT:    mov r3, r0
-+; V8M-COMMON-NEXT:    mov r4, r0
-+; V8M-COMMON-NEXT:    mov r5, r0
-+; V8M-COMMON-NEXT:    mov r6, r0
-+; V8M-COMMON-NEXT:    mov r7, r0
-+; V8M-COMMON-NEXT:    mov r8, r0
-+; V8M-COMMON-NEXT:    mov r9, r0
-+; V8M-COMMON-NEXT:    mov r10, r0
-+; V8M-COMMON-NEXT:    mov r11, r0
-+; V8M-COMMON-NEXT:    mov r12, r0
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, r0
-+; V8M-COMMON-NEXT:    blxns r0
-+; V8M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    add sp, #136
-+; V8M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    sxth r0, r0
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    pop {r7, pc}
-+;
-+; V81M-COMMON-LABEL: access_i16:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    push {r7, lr}
-+; V81M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V81M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V81M-COMMON-NEXT:    ldr r0, [r0]
-+; V81M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    bic r0, r0, #1
-+; V81M-COMMON-NEXT:    sub sp, #136
-+; V81M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
-+; V81M-COMMON-NEXT:    blxns r0
-+; V81M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    add sp, #136
-+; V81M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    sxth r0, r0
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    pop {r7, pc}
-+entry:
-+  %0 = load ptr, ptr @get_idx, align 4
-+  %call = tail call signext i16 %0() "cmse_nonsecure_call"
-+  %idxprom = sext i16 %call to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %1 = load i32, ptr %arrayidx, align 4
-+  ret i32 %1
-+}
-+
-+define i32 @access_u16() {
-+; V8M-COMMON-LABEL: access_u16:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    push {r7, lr}
-+; V8M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V8M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V8M-COMMON-NEXT:    ldr r0, [r0]
-+; V8M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    bic r0, r0, #1
-+; V8M-COMMON-NEXT:    sub sp, #136
-+; V8M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    mov r1, r0
-+; V8M-COMMON-NEXT:    mov r2, r0
-+; V8M-COMMON-NEXT:    mov r3, r0
-+; V8M-COMMON-NEXT:    mov r4, r0
-+; V8M-COMMON-NEXT:    mov r5, r0
-+; V8M-COMMON-NEXT:    mov r6, r0
-+; V8M-COMMON-NEXT:    mov r7, r0
-+; V8M-COMMON-NEXT:    mov r8, r0
-+; V8M-COMMON-NEXT:    mov r9, r0
-+; V8M-COMMON-NEXT:    mov r10, r0
-+; V8M-COMMON-NEXT:    mov r11, r0
-+; V8M-COMMON-NEXT:    mov r12, r0
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, r0
-+; V8M-COMMON-NEXT:    blxns r0
-+; V8M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    add sp, #136
-+; V8M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    uxth r0, r0
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    pop {r7, pc}
-+;
-+; V81M-COMMON-LABEL: access_u16:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    push {r7, lr}
-+; V81M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V81M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V81M-COMMON-NEXT:    ldr r0, [r0]
-+; V81M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    bic r0, r0, #1
-+; V81M-COMMON-NEXT:    sub sp, #136
-+; V81M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
-+; V81M-COMMON-NEXT:    blxns r0
-+; V81M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    add sp, #136
-+; V81M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    uxth r0, r0
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    pop {r7, pc}
-+entry:
-+  %0 = load ptr, ptr @get_idx, align 4
-+  %call = tail call zeroext i16 %0() "cmse_nonsecure_call"
-+  %idxprom = zext i16 %call to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %1 = load i32, ptr %arrayidx, align 4
-+  ret i32 %1
-+}
-+
-+define i32 @access_i8() {
-+; V8M-COMMON-LABEL: access_i8:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    push {r7, lr}
-+; V8M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V8M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V8M-COMMON-NEXT:    ldr r0, [r0]
-+; V8M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    bic r0, r0, #1
-+; V8M-COMMON-NEXT:    sub sp, #136
-+; V8M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    mov r1, r0
-+; V8M-COMMON-NEXT:    mov r2, r0
-+; V8M-COMMON-NEXT:    mov r3, r0
-+; V8M-COMMON-NEXT:    mov r4, r0
-+; V8M-COMMON-NEXT:    mov r5, r0
-+; V8M-COMMON-NEXT:    mov r6, r0
-+; V8M-COMMON-NEXT:    mov r7, r0
-+; V8M-COMMON-NEXT:    mov r8, r0
-+; V8M-COMMON-NEXT:    mov r9, r0
-+; V8M-COMMON-NEXT:    mov r10, r0
-+; V8M-COMMON-NEXT:    mov r11, r0
-+; V8M-COMMON-NEXT:    mov r12, r0
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, r0
-+; V8M-COMMON-NEXT:    blxns r0
-+; V8M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    add sp, #136
-+; V8M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    sxtb r0, r0
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    pop {r7, pc}
-+;
-+; V81M-COMMON-LABEL: access_i8:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    push {r7, lr}
-+; V81M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V81M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V81M-COMMON-NEXT:    ldr r0, [r0]
-+; V81M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    bic r0, r0, #1
-+; V81M-COMMON-NEXT:    sub sp, #136
-+; V81M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
-+; V81M-COMMON-NEXT:    blxns r0
-+; V81M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    add sp, #136
-+; V81M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    sxtb r0, r0
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    pop {r7, pc}
-+entry:
-+  %0 = load ptr, ptr @get_idx, align 4
-+  %call = tail call signext i8 %0() "cmse_nonsecure_call"
-+  %idxprom = sext i8 %call to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %1 = load i32, ptr %arrayidx, align 4
-+  ret i32 %1
-+}
-+
-+define i32 @access_u8() {
-+; V8M-COMMON-LABEL: access_u8:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    push {r7, lr}
-+; V8M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V8M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V8M-COMMON-NEXT:    ldr r0, [r0]
-+; V8M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    bic r0, r0, #1
-+; V8M-COMMON-NEXT:    sub sp, #136
-+; V8M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    mov r1, r0
-+; V8M-COMMON-NEXT:    mov r2, r0
-+; V8M-COMMON-NEXT:    mov r3, r0
-+; V8M-COMMON-NEXT:    mov r4, r0
-+; V8M-COMMON-NEXT:    mov r5, r0
-+; V8M-COMMON-NEXT:    mov r6, r0
-+; V8M-COMMON-NEXT:    mov r7, r0
-+; V8M-COMMON-NEXT:    mov r8, r0
-+; V8M-COMMON-NEXT:    mov r9, r0
-+; V8M-COMMON-NEXT:    mov r10, r0
-+; V8M-COMMON-NEXT:    mov r11, r0
-+; V8M-COMMON-NEXT:    mov r12, r0
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, r0
-+; V8M-COMMON-NEXT:    blxns r0
-+; V8M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    add sp, #136
-+; V8M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    uxtb r0, r0
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    pop {r7, pc}
-+;
-+; V81M-COMMON-LABEL: access_u8:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    push {r7, lr}
-+; V81M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V81M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V81M-COMMON-NEXT:    ldr r0, [r0]
-+; V81M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    bic r0, r0, #1
-+; V81M-COMMON-NEXT:    sub sp, #136
-+; V81M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
-+; V81M-COMMON-NEXT:    blxns r0
-+; V81M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    add sp, #136
-+; V81M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    uxtb r0, r0
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    pop {r7, pc}
-+entry:
-+  %0 = load ptr, ptr @get_idx, align 4
-+  %call = tail call zeroext i8 %0() "cmse_nonsecure_call"
-+  %idxprom = zext i8 %call to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %1 = load i32, ptr %arrayidx, align 4
-+  ret i32 %1
-+}
-+
-+define i32 @access_i1() {
-+; V8M-COMMON-LABEL: access_i1:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    push {r7, lr}
-+; V8M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V8M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V8M-COMMON-NEXT:    ldr r0, [r0]
-+; V8M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    bic r0, r0, #1
-+; V8M-COMMON-NEXT:    sub sp, #136
-+; V8M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    mov r1, r0
-+; V8M-COMMON-NEXT:    mov r2, r0
-+; V8M-COMMON-NEXT:    mov r3, r0
-+; V8M-COMMON-NEXT:    mov r4, r0
-+; V8M-COMMON-NEXT:    mov r5, r0
-+; V8M-COMMON-NEXT:    mov r6, r0
-+; V8M-COMMON-NEXT:    mov r7, r0
-+; V8M-COMMON-NEXT:    mov r8, r0
-+; V8M-COMMON-NEXT:    mov r9, r0
-+; V8M-COMMON-NEXT:    mov r10, r0
-+; V8M-COMMON-NEXT:    mov r11, r0
-+; V8M-COMMON-NEXT:    mov r12, r0
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, r0
-+; V8M-COMMON-NEXT:    blxns r0
-+; V8M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    add sp, #136
-+; V8M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    and r0, r0, #1
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    pop {r7, pc}
-+;
-+; V81M-COMMON-LABEL: access_i1:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    push {r7, lr}
-+; V81M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V81M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V81M-COMMON-NEXT:    ldr r0, [r0]
-+; V81M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    bic r0, r0, #1
-+; V81M-COMMON-NEXT:    sub sp, #136
-+; V81M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
-+; V81M-COMMON-NEXT:    blxns r0
-+; V81M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    add sp, #136
-+; V81M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    and r0, r0, #1
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    pop {r7, pc}
-+entry:
-+  %0 = load ptr, ptr @get_idx, align 4
-+  %call = tail call zeroext i1 %0() "cmse_nonsecure_call"
-+  %idxprom = zext i1 %call to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %1 = load i32, ptr %arrayidx, align 4
-+  ret i32 %1
-+}
-+
-+define i32 @access_i5() {
-+; V8M-COMMON-LABEL: access_i5:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    push {r7, lr}
-+; V8M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V8M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V8M-COMMON-NEXT:    ldr r0, [r0]
-+; V8M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    bic r0, r0, #1
-+; V8M-COMMON-NEXT:    sub sp, #136
-+; V8M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    mov r1, r0
-+; V8M-COMMON-NEXT:    mov r2, r0
-+; V8M-COMMON-NEXT:    mov r3, r0
-+; V8M-COMMON-NEXT:    mov r4, r0
-+; V8M-COMMON-NEXT:    mov r5, r0
-+; V8M-COMMON-NEXT:    mov r6, r0
-+; V8M-COMMON-NEXT:    mov r7, r0
-+; V8M-COMMON-NEXT:    mov r8, r0
-+; V8M-COMMON-NEXT:    mov r9, r0
-+; V8M-COMMON-NEXT:    mov r10, r0
-+; V8M-COMMON-NEXT:    mov r11, r0
-+; V8M-COMMON-NEXT:    mov r12, r0
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, r0
-+; V8M-COMMON-NEXT:    blxns r0
-+; V8M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    add sp, #136
-+; V8M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    sbfx r0, r0, #0, #5
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    pop {r7, pc}
-+;
-+; V81M-COMMON-LABEL: access_i5:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    push {r7, lr}
-+; V81M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V81M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V81M-COMMON-NEXT:    ldr r0, [r0]
-+; V81M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    bic r0, r0, #1
-+; V81M-COMMON-NEXT:    sub sp, #136
-+; V81M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
-+; V81M-COMMON-NEXT:    blxns r0
-+; V81M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    add sp, #136
-+; V81M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    sbfx r0, r0, #0, #5
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    pop {r7, pc}
-+entry:
-+  %0 = load ptr, ptr @get_idx, align 4
-+  %call = tail call signext i5 %0() "cmse_nonsecure_call"
-+  %idxprom = sext i5 %call to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %1 = load i32, ptr %arrayidx, align 4
-+  ret i32 %1
-+}
-+
-+define i32 @access_u5() {
-+; V8M-COMMON-LABEL: access_u5:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    push {r7, lr}
-+; V8M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V8M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V8M-COMMON-NEXT:    ldr r0, [r0]
-+; V8M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    bic r0, r0, #1
-+; V8M-COMMON-NEXT:    sub sp, #136
-+; V8M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    mov r1, r0
-+; V8M-COMMON-NEXT:    mov r2, r0
-+; V8M-COMMON-NEXT:    mov r3, r0
-+; V8M-COMMON-NEXT:    mov r4, r0
-+; V8M-COMMON-NEXT:    mov r5, r0
-+; V8M-COMMON-NEXT:    mov r6, r0
-+; V8M-COMMON-NEXT:    mov r7, r0
-+; V8M-COMMON-NEXT:    mov r8, r0
-+; V8M-COMMON-NEXT:    mov r9, r0
-+; V8M-COMMON-NEXT:    mov r10, r0
-+; V8M-COMMON-NEXT:    mov r11, r0
-+; V8M-COMMON-NEXT:    mov r12, r0
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, r0
-+; V8M-COMMON-NEXT:    blxns r0
-+; V8M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    add sp, #136
-+; V8M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    and r0, r0, #31
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    pop {r7, pc}
-+;
-+; V81M-COMMON-LABEL: access_u5:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    push {r7, lr}
-+; V81M-COMMON-NEXT:    movw r0, :lower16:get_idx
-+; V81M-COMMON-NEXT:    movt r0, :upper16:get_idx
-+; V81M-COMMON-NEXT:    ldr r0, [r0]
-+; V81M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    bic r0, r0, #1
-+; V81M-COMMON-NEXT:    sub sp, #136
-+; V81M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
-+; V81M-COMMON-NEXT:    blxns r0
-+; V81M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    add sp, #136
-+; V81M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    and r0, r0, #31
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    pop {r7, pc}
-+entry:
-+  %0 = load ptr, ptr @get_idx, align 4
-+  %call = tail call zeroext i5 %0() "cmse_nonsecure_call"
-+  %idxprom = zext i5 %call to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %1 = load i32, ptr %arrayidx, align 4
-+  ret i32 %1
-+}
-+
-+define i32 @access_i33(ptr %f) {
-+; V8M-COMMON-LABEL: access_i33:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    push {r7, lr}
-+; V8M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    bic r0, r0, #1
-+; V8M-COMMON-NEXT:    sub sp, #136
-+; V8M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    mov r1, r0
-+; V8M-COMMON-NEXT:    mov r2, r0
-+; V8M-COMMON-NEXT:    mov r3, r0
-+; V8M-COMMON-NEXT:    mov r4, r0
-+; V8M-COMMON-NEXT:    mov r5, r0
-+; V8M-COMMON-NEXT:    mov r6, r0
-+; V8M-COMMON-NEXT:    mov r7, r0
-+; V8M-COMMON-NEXT:    mov r8, r0
-+; V8M-COMMON-NEXT:    mov r9, r0
-+; V8M-COMMON-NEXT:    mov r10, r0
-+; V8M-COMMON-NEXT:    mov r11, r0
-+; V8M-COMMON-NEXT:    mov r12, r0
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, r0
-+; V8M-COMMON-NEXT:    blxns r0
-+; V8M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    add sp, #136
-+; V8M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-LE-NEXT:        and r0, r1, #1
-+; V8M-BE-NEXT:        and r0, r0, #1
-+; V8M-COMMON-NEXT:    rsb.w r0, r0, #0
-+; V8M-COMMON-NEXT:    pop {r7, pc}
-+;
-+; V81M-COMMON-LABEL: access_i33:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    push {r7, lr}
-+; V81M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    bic r0, r0, #1
-+; V81M-COMMON-NEXT:    sub sp, #136
-+; V81M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
-+; V81M-COMMON-NEXT:    blxns r0
-+; V81M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    add sp, #136
-+; V81M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-LE-NEXT:        and r0, r1, #1
-+; V81M-BE-NEXT:        and r0, r0, #1
-+; V81M-COMMON-NEXT:    rsb.w r0, r0, #0
-+; V81M-COMMON-NEXT:    pop {r7, pc}
-+entry:
-+  %call = tail call i33 %f() "cmse_nonsecure_call"
-+  %shr = ashr i33 %call, 32
-+  %conv = trunc nsw i33 %shr to i32
-+  ret i32 %conv
-+}
-+
-+define i32 @access_u33(ptr %f) {
-+; V8M-COMMON-LABEL: access_u33:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    push {r7, lr}
-+; V8M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-COMMON-NEXT:    bic r0, r0, #1
-+; V8M-COMMON-NEXT:    sub sp, #136
-+; V8M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    mov r1, r0
-+; V8M-COMMON-NEXT:    mov r2, r0
-+; V8M-COMMON-NEXT:    mov r3, r0
-+; V8M-COMMON-NEXT:    mov r4, r0
-+; V8M-COMMON-NEXT:    mov r5, r0
-+; V8M-COMMON-NEXT:    mov r6, r0
-+; V8M-COMMON-NEXT:    mov r7, r0
-+; V8M-COMMON-NEXT:    mov r8, r0
-+; V8M-COMMON-NEXT:    mov r9, r0
-+; V8M-COMMON-NEXT:    mov r10, r0
-+; V8M-COMMON-NEXT:    mov r11, r0
-+; V8M-COMMON-NEXT:    mov r12, r0
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, r0
-+; V8M-COMMON-NEXT:    blxns r0
-+; V8M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V8M-COMMON-NEXT:    add sp, #136
-+; V8M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V8M-LE-NEXT:        and r0, r1, #1
-+; V8M-BE-NEXT:        and r0, r0, #1
-+; V8M-COMMON-NEXT:    pop {r7, pc}
-+;
-+; V81M-COMMON-LABEL: access_u33:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    push {r7, lr}
-+; V81M-COMMON-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-COMMON-NEXT:    bic r0, r0, #1
-+; V81M-COMMON-NEXT:    sub sp, #136
-+; V81M-COMMON-NEXT:    vlstm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
-+; V81M-COMMON-NEXT:    blxns r0
-+; V81M-COMMON-NEXT:    vlldm sp, {d0 - d15}
-+; V81M-COMMON-NEXT:    add sp, #136
-+; V81M-COMMON-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
-+; V81M-LE-NEXT:        and r0, r1, #1
-+; V81M-BE-NEXT:        and r0, r0, #1
-+; V81M-COMMON-NEXT:    pop {r7, pc}
-+entry:
-+  %call = tail call i33 %f() "cmse_nonsecure_call"
-+  %shr = lshr i33 %call, 32
-+  %conv = trunc nuw nsw i33 %shr to i32
-+  ret i32 %conv
-+}
-diff --git a/llvm/test/CodeGen/ARM/cmse-harden-entry-arguments.ll b/llvm/test/CodeGen/ARM/cmse-harden-entry-arguments.ll
-new file mode 100644
-index 0000000000..c66ab00566dd
---- /dev/null
-+++ b/llvm/test/CodeGen/ARM/cmse-harden-entry-arguments.ll
-@@ -0,0 +1,368 @@
-+; RUN: llc %s -mtriple=thumbv8m.main     -o - | FileCheck %s --check-prefixes V8M-COMMON,V8M-LE
-+; RUN: llc %s -mtriple=thumbebv8m.main   -o - | FileCheck %s --check-prefixes V8M-COMMON,V8M-BE
-+; RUN: llc %s -mtriple=thumbv8.1m.main   -o - | FileCheck %s --check-prefixes V81M-COMMON,V81M-LE
-+; RUN: llc %s -mtriple=thumbebv8.1m.main -o - | FileCheck %s --check-prefixes V81M-COMMON,V81M-BE
-+
-+@arr = hidden local_unnamed_addr global [256 x i32] zeroinitializer, align 4
-+
-+define i32 @access_i16(i16 signext %idx) "cmse_nonsecure_entry" {
-+; V8M-COMMON-LABEL: access_i16:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    sxth r0, r0
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    mov r2, lr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    mov r1, lr
-+; V8M-COMMON-NEXT:    mov r3, lr
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, lr
-+; V8M-COMMON-NEXT:    mov r12, lr
-+; V8M-COMMON-NEXT:    bxns lr
-+;
-+; V81M-COMMON-LABEL: access_i16:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    vstr fpcxtns, [sp, #-4]!
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    sxth r0, r0
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
-+; V81M-COMMON-NEXT:    vldr fpcxtns, [sp], #4
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r12, apsr}
-+; V81M-COMMON-NEXT:    bxns lr
-+entry:
-+  %idxprom = sext i16 %idx to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %0 = load i32, ptr %arrayidx, align 4
-+  ret i32 %0
-+}
-+
-+define i32 @access_u16(i16 zeroext %idx) "cmse_nonsecure_entry" {
-+; V8M-COMMON-LABEL: access_u16:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    uxth r0, r0
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    mov r2, lr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    mov r1, lr
-+; V8M-COMMON-NEXT:    mov r3, lr
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, lr
-+; V8M-COMMON-NEXT:    mov r12, lr
-+; V8M-COMMON-NEXT:    bxns lr
-+;
-+; V81M-COMMON-LABEL: access_u16:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    vstr fpcxtns, [sp, #-4]!
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    uxth r0, r0
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
-+; V81M-COMMON-NEXT:    vldr fpcxtns, [sp], #4
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r12, apsr}
-+; V81M-COMMON-NEXT:    bxns lr
-+entry:
-+  %idxprom = zext i16 %idx to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %0 = load i32, ptr %arrayidx, align 4
-+  ret i32 %0
-+}
-+
-+define i32 @access_i8(i8 signext %idx) "cmse_nonsecure_entry" {
-+; V8M-COMMON-LABEL: access_i8:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    sxtb r0, r0
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    mov r2, lr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    mov r1, lr
-+; V8M-COMMON-NEXT:    mov r3, lr
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, lr
-+; V8M-COMMON-NEXT:    mov r12, lr
-+; V8M-COMMON-NEXT:    bxns lr
-+;
-+; V81M-COMMON-LABEL: access_i8:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    vstr fpcxtns, [sp, #-4]!
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    sxtb r0, r0
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
-+; V81M-COMMON-NEXT:    vldr fpcxtns, [sp], #4
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r12, apsr}
-+; V81M-COMMON-NEXT:    bxns lr
-+entry:
-+  %idxprom = sext i8 %idx to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %0 = load i32, ptr %arrayidx, align 4
-+  ret i32 %0
-+}
-+
-+define i32 @access_u8(i8 zeroext %idx) "cmse_nonsecure_entry" {
-+; V8M-COMMON-LABEL: access_u8:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    uxtb r0, r0
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    mov r2, lr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    mov r1, lr
-+; V8M-COMMON-NEXT:    mov r3, lr
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, lr
-+; V8M-COMMON-NEXT:    mov r12, lr
-+; V8M-COMMON-NEXT:    bxns lr
-+;
-+; V81M-COMMON-LABEL: access_u8:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    vstr fpcxtns, [sp, #-4]!
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    uxtb r0, r0
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
-+; V81M-COMMON-NEXT:    vldr fpcxtns, [sp], #4
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r12, apsr}
-+; V81M-COMMON-NEXT:    bxns lr
-+entry:
-+  %idxprom = zext i8 %idx to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %0 = load i32, ptr %arrayidx, align 4
-+  ret i32 %0
-+}
-+
-+define i32 @access_i1(i1 signext %idx) "cmse_nonsecure_entry" {
-+; V8M-COMMON-LABEL: access_i1:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    and r0, r0, #1
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    rsbs r0, r0, #0
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    and r0, r0, #1
-+; V8M-COMMON-NEXT:    mov r2, lr
-+; V8M-COMMON-NEXT:    mov r3, lr
-+; V8M-COMMON-NEXT:    mov r12, lr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    mov r1, lr
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, lr
-+; V8M-COMMON-NEXT:    bxns lr
-+;
-+; V81M-COMMON-LABEL: access_i1:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    vstr fpcxtns, [sp, #-4]!
-+; V81M-COMMON-NEXT:    and r0, r0, #1
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    rsbs r0, r0, #0
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    and r0, r0, #1
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
-+; V81M-COMMON-NEXT:    vldr fpcxtns, [sp], #4
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r12, apsr}
-+; V81M-COMMON-NEXT:    bxns lr
-+entry:
-+  %idxprom = zext i1 %idx to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %0 = load i32, ptr %arrayidx, align 4
-+  ret i32 %0
-+}
-+
-+define i32 @access_i5(i5 signext %idx) "cmse_nonsecure_entry" {
-+; V8M-COMMON-LABEL: access_i5:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    sbfx r0, r0, #0, #5
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    mov r2, lr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    mov r1, lr
-+; V8M-COMMON-NEXT:    mov r3, lr
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, lr
-+; V8M-COMMON-NEXT:    mov r12, lr
-+; V8M-COMMON-NEXT:    bxns lr
-+;
-+; V81M-COMMON-LABEL: access_i5:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    vstr fpcxtns, [sp, #-4]!
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    sbfx r0, r0, #0, #5
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
-+; V81M-COMMON-NEXT:    vldr fpcxtns, [sp], #4
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r12, apsr}
-+; V81M-COMMON-NEXT:    bxns lr
-+entry:
-+  %idxprom = sext i5 %idx to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %0 = load i32, ptr %arrayidx, align 4
-+  ret i32 %0
-+}
-+
-+define i32 @access_u5(i5 zeroext %idx) "cmse_nonsecure_entry" {
-+; V8M-COMMON-LABEL: access_u5:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V8M-COMMON-NEXT:    and r0, r0, #31
-+; V8M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V8M-COMMON-NEXT:    mov r2, lr
-+; V8M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V8M-COMMON-NEXT:    mov r1, lr
-+; V8M-COMMON-NEXT:    mov r3, lr
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, lr
-+; V8M-COMMON-NEXT:    mov r12, lr
-+; V8M-COMMON-NEXT:    bxns lr
-+;
-+; V81M-COMMON-LABEL: access_u5:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    vstr fpcxtns, [sp, #-4]!
-+; V81M-COMMON-NEXT:    movw r1, :lower16:arr
-+; V81M-COMMON-NEXT:    and r0, r0, #31
-+; V81M-COMMON-NEXT:    movt r1, :upper16:arr
-+; V81M-COMMON-NEXT:    ldr.w r0, [r1, r0, lsl #2]
-+; V81M-COMMON-NEXT:    vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
-+; V81M-COMMON-NEXT:    vldr fpcxtns, [sp], #4
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r12, apsr}
-+; V81M-COMMON-NEXT:    bxns lr
-+entry:
-+  %idxprom = zext i5 %idx to i32
-+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
-+  %0 = load i32, ptr %arrayidx, align 4
-+  ret i32 %0
-+}
-+
-+define i32 @access_i33(i33 %arg) "cmse_nonsecure_entry" {
-+; V8M-COMMON-LABEL: access_i33:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-LE-NEXT:        and r0, r1, #1
-+; V8M-BE-NEXT:        and r0, r0, #1
-+; V8M-COMMON-NEXT:    mov r1, lr
-+; V8M-COMMON-NEXT:    rsbs r0, r0, #0
-+; V8M-COMMON-NEXT:    mov r2, lr
-+; V8M-COMMON-NEXT:    mov r3, lr
-+; V8M-COMMON-NEXT:    mov r12, lr
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, lr
-+; V8M-COMMON-NEXT:    bxns lr
-+;
-+; V81M-COMMON-LABEL: access_i33:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    vstr fpcxtns, [sp, #-4]!
-+; V81M-LE-NEXT:        and r0, r1, #1
-+; V81M-BE-NEXT:        and r0, r0, #1
-+; V81M-COMMON-NEXT:    vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
-+; V81M-COMMON-NEXT:    rsbs r0, r0, #0
-+; V81M-COMMON-NEXT:    vldr fpcxtns, [sp], #4
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r12, apsr}
-+; V81M-COMMON-NEXT:    bxns lr
-+entry:
-+  %shr = ashr i33 %arg, 32
-+  %conv = trunc nsw i33 %shr to i32
-+  ret i32 %conv
-+}
-+
-+define i32 @access_u33(i33 %arg) "cmse_nonsecure_entry" {
-+; V8M-COMMON-LABEL: access_u33:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-LE-NEXT:        and r0, r1, #1
-+; V8M-BE-NEXT:        and r0, r0, #1
-+; V8M-COMMON-NEXT:    mov r1, lr
-+; V8M-COMMON-NEXT:    mov r2, lr
-+; V8M-COMMON-NEXT:    mov r3, lr
-+; V8M-COMMON-NEXT:    mov r12, lr
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, lr
-+; V8M-COMMON-NEXT:    bxns lr
-+;
-+; V81M-COMMON-LABEL: access_u33:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    vstr fpcxtns, [sp, #-4]!
-+; V81M-LE-NEXT:        and r0, r1, #1
-+; V81M-BE-NEXT:        and r0, r0, #1
-+; V81M-COMMON-NEXT:    vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
-+; V81M-COMMON-NEXT:    vldr fpcxtns, [sp], #4
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r12, apsr}
-+; V81M-COMMON-NEXT:    bxns lr
-+entry:
-+  %shr = lshr i33 %arg, 32
-+  %conv = trunc nuw nsw i33 %shr to i32
-+  ret i32 %conv
-+}
-+
-+define i32 @access_i65(ptr byval(i65) %0) "cmse_nonsecure_entry" {
-+; V8M-COMMON-LABEL: access_i65:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    sub sp, #16
-+; V8M-COMMON-NEXT:    stm.w sp, {r0, r1, r2, r3}
-+; V8M-LE-NEXT:        ldrb.w r0, [sp, #8]
-+; V8M-LE-NEXT:        and r0, r0, #1
-+; V8M-LE-NEXT:        rsbs r0, r0, #0
-+; V8M-BE-NEXT:        movs r1, #0
-+; V8M-BE-NEXT:        sub.w r0, r1, r0, lsr #24
-+; V8M-COMMON-NEXT:    add sp, #16
-+; V8M-COMMON-NEXT:    mov r1, lr
-+; V8M-COMMON-NEXT:    mov r2, lr
-+; V8M-COMMON-NEXT:    mov r3, lr
-+; V8M-COMMON-NEXT:    mov r12, lr
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, lr
-+; V8M-COMMON-NEXT:    bxns lr
-+;
-+; V81M-COMMON-LABEL: access_i65:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    vstr fpcxtns, [sp, #-4]!
-+; V81M-COMMON-NEXT:    sub sp, #16
-+; V81M-COMMON-NEXT:    add sp, #4
-+; V81M-COMMON-NEXT:    stm.w sp, {r0, r1, r2, r3}
-+; V81M-LE-NEXT:        ldrb.w r0, [sp, #8]
-+; V81M-LE-NEXT:        and r0, r0, #1
-+; V81M-LE-NEXT:        rsbs r0, r0, #0
-+; V81M-BE-NEXT:        movs r1, #0
-+; V81M-BE-NEXT:        sub.w r0, r1, r0, lsr #24
-+; V81M-COMMON-NEXT:    sub sp, #4
-+; V81M-COMMON-NEXT:    add sp, #16
-+; V81M-COMMON-NEXT:    vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
-+; V81M-COMMON-NEXT:    vldr fpcxtns, [sp], #4
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r12, apsr}
-+; V81M-COMMON-NEXT:    bxns lr
-+entry:
-+  %arg = load i65, ptr %0, align 8
-+  %shr = ashr i65 %arg, 64
-+  %conv = trunc nsw i65 %shr to i32
-+  ret i32 %conv
-+}
-+
-+define i32 @access_u65(ptr byval(i65) %0) "cmse_nonsecure_entry" {
-+; V8M-COMMON-LABEL: access_u65:
-+; V8M-COMMON:       @ %bb.0: @ %entry
-+; V8M-COMMON-NEXT:    sub sp, #16
-+; V8M-COMMON-NEXT:    stm.w sp, {r0, r1, r2, r3}
-+; V8M-LE-NEXT:        ldrb.w r0, [sp, #8]
-+; V8M-BE-NEXT:        lsrs r0, r0, #24
-+; V8M-COMMON-NEXT:    add sp, #16
-+; V8M-COMMON-NEXT:    mov r1, lr
-+; V8M-COMMON-NEXT:    mov r2, lr
-+; V8M-COMMON-NEXT:    mov r3, lr
-+; V8M-COMMON-NEXT:    mov r12, lr
-+; V8M-COMMON-NEXT:    msr apsr_nzcvq, lr
-+; V8M-COMMON-NEXT:    bxns lr
-+;
-+; V81M-COMMON-LABEL: access_u65:
-+; V81M-COMMON:       @ %bb.0: @ %entry
-+; V81M-COMMON-NEXT:    vstr fpcxtns, [sp, #-4]!
-+; V81M-COMMON-NEXT:    sub sp, #16
-+; V81M-COMMON-NEXT:    add sp, #4
-+; V81M-COMMON-NEXT:    stm.w sp, {r0, r1, r2, r3}
-+; V81M-LE-NEXT:        ldrb.w r0, [sp, #8]
-+; V81M-BE-NEXT:        lsrs r0, r0, #24
-+; V81M-COMMON-NEXT:    sub sp, #4
-+; V81M-COMMON-NEXT:    add sp, #16
-+; V81M-COMMON-NEXT:    vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
-+; V81M-COMMON-NEXT:    vldr fpcxtns, [sp], #4
-+; V81M-COMMON-NEXT:    clrm {r1, r2, r3, r12, apsr}
-+; V81M-COMMON-NEXT:    bxns lr
-+entry:
-+  %arg = load i65, ptr %0, align 8
-+  %shr = lshr i65 %arg, 64
-+  %conv = trunc nuw nsw i65 %shr to i32
-+  ret i32 %conv
-+}
diff --git a/meta/recipes-devtools/llvm/llvm_18.1.8.bb b/meta/recipes-devtools/llvm/llvm_19.1.0.bb
similarity index 97%
rename from meta/recipes-devtools/llvm/llvm_18.1.8.bb
rename to meta/recipes-devtools/llvm/llvm_19.1.0.bb
index bf8d869662..35e7f4c07f 100644
--- a/meta/recipes-devtools/llvm/llvm_18.1.8.bb
+++ b/meta/recipes-devtools/llvm/llvm_19.1.0.bb
@@ -25,10 +25,9 @@  LLVM_RELEASE = "${PV}"
 SRC_URI = "https://github.com/llvm/llvm-project/releases/download/llvmorg-${PV}/llvm-project-${PV}.src.tar.xz \
            file://0007-llvm-allow-env-override-of-exe-path.patch;striplevel=2 \
            file://0001-AsmMatcherEmitter-sort-ClassInfo-lists-by-name-as-we.patch;striplevel=2 \
-           file://0002-llvm-Fix-CVE-2024-0151.patch;striplevel=2 \
            file://llvm-config \
            "
-SRC_URI[sha256sum] = "0b58557a6d32ceee97c8d533a59b9212d87e0fc4d2833924eb6c611247db2f2a"
+SRC_URI[sha256sum] = "5042522b49945bc560ff9206f25fb87980a9b89b914193ca00d961511ff0673c"
 UPSTREAM_CHECK_URI = "https://github.com/llvm/llvm-project"
 UPSTREAM_CHECK_REGEX = "llvmorg-(?P<pver>\d+(\.\d+)+)"