From f0c55418e2b09eaab37c820d3756cc1b4584d084 Mon Sep 17 00:00:00 2001 From: Dimitry Andric Date: Apr 26 2017 19:24:09 +0000 Subject: Vendor import of clang trunk r301441: https://llvm.org/svn/llvm-project/cfe/trunk@301441 --- diff --git a/cmake/caches/Fuchsia-stage2.cmake b/cmake/caches/Fuchsia-stage2.cmake index 2e1e9a1..ca43e60 100644 --- a/cmake/caches/Fuchsia-stage2.cmake +++ b/cmake/caches/Fuchsia-stage2.cmake @@ -1,7 +1,7 @@ # This file sets up a CMakeCache for the second stage of a Fuchsia toolchain # build. -set(LLVM_TARGETS_TO_BUILD X86;AArch64 CACHE STRING "") +set(LLVM_TARGETS_TO_BUILD X86;ARM;AArch64 CACHE STRING "") set(PACKAGE_VENDOR Fuchsia CACHE STRING "") @@ -36,6 +36,7 @@ set(BUILTINS_aarch64-fuchsia-none_CMAKE_SYSTEM_NAME Fuchsia CACHE STRING "") # Setup toolchain. set(LLVM_INSTALL_TOOLCHAIN_ONLY ON CACHE BOOL "") set(LLVM_TOOLCHAIN_TOOLS + llc llvm-ar llvm-cov llvm-cxxfilt @@ -49,6 +50,7 @@ set(LLVM_TOOLCHAIN_TOOLS llvm-readobj llvm-size llvm-symbolizer + opt CACHE STRING "") set(LLVM_DISTRIBUTION_COMPONENTS diff --git a/docs/ClangFormat.rst b/docs/ClangFormat.rst index dc1e2b9..ed0e58e 100644 --- a/docs/ClangFormat.rst +++ b/docs/ClangFormat.rst @@ -162,8 +162,9 @@ Download the latest Visual Studio extension from the `alpha build site Script for patch reformatting ============================= -The python script `clang/tools/clang-format-diff.py` parses the output of -a unified diff and reformats all contained lines with :program:`clang-format`. +The python script `clang/tools/clang-format/clang-format-diff.py` parses the +output of a unified diff and reformats all contained lines with +:program:`clang-format`. .. code-block:: console diff --git a/include/clang-c/Index.h b/include/clang-c/Index.h index 1ac2b7f..9acc9b7 100644 --- a/include/clang-c/Index.h +++ b/include/clang-c/Index.h @@ -5600,7 +5600,8 @@ typedef enum { CXIdxEntityLang_None = 0, CXIdxEntityLang_C = 1, CXIdxEntityLang_ObjC = 2, - CXIdxEntityLang_CXX = 3 + CXIdxEntityLang_CXX = 3, + CXIdxEntityLang_Swift = 4 } CXIdxEntityLanguage; /** diff --git a/include/clang/AST/CommentSema.h b/include/clang/AST/CommentSema.h index 6a80383..230e527 100644 --- a/include/clang/AST/CommentSema.h +++ b/include/clang/AST/CommentSema.h @@ -208,6 +208,10 @@ public: /// \returns \c true if declaration that this comment is attached to declares /// a function pointer. bool isFunctionPointerVarDecl(); + /// \returns \c true if the declaration that this comment is attached to + /// declares a variable or a field whose type is a function or a block + /// pointer. + bool isFunctionOrBlockPointerVarLikeDecl(); bool isFunctionOrMethodVariadic(); bool isObjCMethodDecl(); bool isObjCPropertyDecl(); diff --git a/include/clang/ASTMatchers/ASTMatchers.h b/include/clang/ASTMatchers/ASTMatchers.h index fb02947..f11469b 100644 --- a/include/clang/ASTMatchers/ASTMatchers.h +++ b/include/clang/ASTMatchers/ASTMatchers.h @@ -1406,7 +1406,7 @@ const internal::VariadicDynCastAllOfMatcher< /// /// Example: Given /// \code -/// struct T {void func()}; +/// struct T {void func();}; /// T f(); /// void g(T); /// \endcode diff --git a/include/clang/Basic/Diagnostic.td b/include/clang/Basic/Diagnostic.td index 39da006..f25068e 100644 --- a/include/clang/Basic/Diagnostic.td +++ b/include/clang/Basic/Diagnostic.td @@ -12,6 +12,8 @@ // //===----------------------------------------------------------------------===// +// See the Internals Manual, section The Diagnostics Subsystem for an overview. + // Define the diagnostic severities. class Severity { string Name = N; @@ -100,10 +102,20 @@ class SuppressInSystemHeader { class Error : Diagnostic, SFINAEFailure { bit ShowInSystemHeader = 1; } +// Warnings default to on (but can be default-off'd with DefaultIgnore). +// This is used for warnings about questionable code; warnings about +// accepted language extensions should use Extension or ExtWarn below instead. class Warning : Diagnostic; +// Remarks can be turned on with -R flags and provide commentary, e.g. on +// optimizer decisions. class Remark : Diagnostic; +// Extensions are warnings about accepted language extensions. +// Extension warnings are default-off but enabled by -pedantic. class Extension : Diagnostic; +// ExtWarns are warnings about accepted language extensions. +// ExtWarn warnings are default-on. class ExtWarn : Diagnostic; +// Notes can provide supplementary information on errors, warnings, and remarks. class Note : Diagnostic; diff --git a/include/clang/Basic/DiagnosticParseKinds.td b/include/clang/Basic/DiagnosticParseKinds.td index d95e43c..f04ed8e 100644 --- a/include/clang/Basic/DiagnosticParseKinds.td +++ b/include/clang/Basic/DiagnosticParseKinds.td @@ -1114,14 +1114,12 @@ def err_pragma_cannot_end_force_cuda_host_device : Error< } // end of Parse Issue category. let CategoryName = "Modules Issue" in { -def err_expected_module_interface_decl : Error< - "expected module declaration at start of module interface">; def err_unexpected_module_decl : Error< - "module declaration must be the first declaration in the translation unit">; + "module declaration can only appear at the top level">; def err_module_expected_ident : Error< - "expected a module name after module%select{| import}0">; -def err_unexpected_module_kind : Error< - "unexpected module kind %0; expected 'implementation' or 'partition'">; + "expected a module name after '%select{module|import}0'">; +def err_module_implementation_partition : Error< + "module partition must be declared 'export'">; def err_attribute_not_module_attr : Error< "%0 attribute cannot be applied to a module">; def err_attribute_not_import_attr : Error< diff --git a/include/clang/Basic/DiagnosticSemaKinds.td b/include/clang/Basic/DiagnosticSemaKinds.td index 6cb872c..b5fed1f 100644 --- a/include/clang/Basic/DiagnosticSemaKinds.td +++ b/include/clang/Basic/DiagnosticSemaKinds.td @@ -8801,9 +8801,11 @@ def err_invalid_type_for_program_scope_var : Error< } let CategoryName = "Modules Issue" in { +def err_module_decl_in_module_map_module : Error< + "'module' declaration found while building module from module map">; def err_module_interface_implementation_mismatch : Error< - "%select{'module'|'module partition'|'module implementation'}0 declaration " - "found while %select{not |not |}0building module interface">; + "missing 'export' specifier in module declaration while " + "building module interface">; def err_current_module_name_mismatch : Error< "module name '%0' specified on command line does not match name of module">; def err_module_redefinition : Error< @@ -8846,8 +8848,13 @@ def err_module_self_import : Error< "import of module '%0' appears within same top-level module '%1'">; def err_module_import_in_implementation : Error< "@import of module '%0' in implementation of '%1'; use #import">; + +// C++ Modules TS def err_export_within_export : Error< "export declaration appears within another export declaration">; +def err_export_not_in_module_interface : Error< + "export declaration can only be used within a module interface unit after " + "the module declaration">; def ext_equivalent_internal_linkage_decl_in_modules : ExtWarn< "ambiguous use of internal linkage declaration %0 defined in multiple modules">, diff --git a/include/clang/Basic/Module.h b/include/clang/Basic/Module.h index 8fcb0e8..28aa7db 100644 --- a/include/clang/Basic/Module.h +++ b/include/clang/Basic/Module.h @@ -62,6 +62,18 @@ public: /// \brief The location of the module definition. SourceLocation DefinitionLoc; + enum ModuleKind { + /// \brief This is a module that was defined by a module map and built out + /// of header files. + ModuleMapModule, + + /// \brief This is a C++ Modules TS module interface unit. + ModuleInterfaceUnit + }; + + /// \brief The kind of this module. + ModuleKind Kind = ModuleMapModule; + /// \brief The parent of this module. This will be NULL for the top-level /// module. Module *Parent; diff --git a/include/clang/Basic/Sanitizers.def b/include/clang/Basic/Sanitizers.def index c574045..f20d326 100644 --- a/include/clang/Basic/Sanitizers.def +++ b/include/clang/Basic/Sanitizers.def @@ -47,6 +47,9 @@ SANITIZER("kernel-address", KernelAddress) // MemorySanitizer SANITIZER("memory", Memory) +// libFuzzer +SANITIZER("fuzzer", Fuzzer) + // ThreadSanitizer SANITIZER("thread", Thread) diff --git a/include/clang/Driver/CC1Options.td b/include/clang/Driver/CC1Options.td index ba09bcc..63348c0 100644 --- a/include/clang/Driver/CC1Options.td +++ b/include/clang/Driver/CC1Options.td @@ -573,6 +573,8 @@ def fblocks_runtime_optional : Flag<["-"], "fblocks-runtime-optional">, HelpText<"Weakly link in the blocks runtime">; def fexternc_nounwind : Flag<["-"], "fexternc-nounwind">, HelpText<"Assume all functions with C linkage do not unwind">; +def enable_split_dwarf : Flag<["-"], "enable-split-dwarf">, + HelpText<"Use split dwarf/Fission">; def split_dwarf_file : Separate<["-"], "split-dwarf-file">, HelpText<"File name to use for split dwarf debug info output">; def fno_wchar : Flag<["-"], "fno-wchar">, diff --git a/include/clang/Driver/SanitizerArgs.h b/include/clang/Driver/SanitizerArgs.h index 2df8077..c7b3e80 100644 --- a/include/clang/Driver/SanitizerArgs.h +++ b/include/clang/Driver/SanitizerArgs.h @@ -50,6 +50,7 @@ class SanitizerArgs { bool needsSharedAsanRt() const { return AsanSharedRuntime; } bool needsTsanRt() const { return Sanitizers.has(SanitizerKind::Thread); } bool needsMsanRt() const { return Sanitizers.has(SanitizerKind::Memory); } + bool needsFuzzer() const { return Sanitizers.has(SanitizerKind::Fuzzer); } bool needsLsanRt() const { return Sanitizers.has(SanitizerKind::Leak) && !Sanitizers.has(SanitizerKind::Address); diff --git a/include/clang/Format/Format.h b/include/clang/Format/Format.h index 05daf4f..9bed253 100644 --- a/include/clang/Format/Format.h +++ b/include/clang/Format/Format.h @@ -1512,6 +1512,18 @@ llvm::Expected cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces, const FormatStyle &Style); +/// \brief Represents the status of a formatting attempt. +struct FormattingAttemptStatus { + /// \brief A value of ``false`` means that any of the affected ranges were not + /// formatted due to a non-recoverable syntax error. + bool FormatComplete = true; + + /// \brief If ``FormatComplete`` is false, ``Line`` records a one-based + /// original line number at which a syntax error might have occurred. This is + /// based on a best-effort analysis and could be imprecise. + unsigned Line = 0; +}; + /// \brief Reformats the given \p Ranges in \p Code. /// /// Each range is extended on either end to its next bigger logic unit, i.e. @@ -1521,13 +1533,20 @@ cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces, /// Returns the ``Replacements`` necessary to make all \p Ranges comply with /// \p Style. /// -/// If ``IncompleteFormat`` is non-null, its value will be set to true if any -/// of the affected ranges were not formatted due to a non-recoverable syntax -/// error. +/// If ``Status`` is non-null, its value will be populated with the status of +/// this formatting attempt. See \c FormattingAttemptStatus. tooling::Replacements reformat(const FormatStyle &Style, StringRef Code, ArrayRef Ranges, StringRef FileName = "", - bool *IncompleteFormat = nullptr); + FormattingAttemptStatus *Status = nullptr); + +/// \brief Same as above, except if ``IncompleteFormat`` is non-null, its value +/// will be set to true if any of the affected ranges were not formatted due to +/// a non-recoverable syntax error. +tooling::Replacements reformat(const FormatStyle &Style, StringRef Code, + ArrayRef Ranges, + StringRef FileName, + bool *IncompleteFormat); /// \brief Clean up any erroneous/redundant code in the given \p Ranges in \p /// Code. diff --git a/include/clang/Frontend/CodeGenOptions.def b/include/clang/Frontend/CodeGenOptions.def index 9565b22..0b7d466 100644 --- a/include/clang/Frontend/CodeGenOptions.def +++ b/include/clang/Frontend/CodeGenOptions.def @@ -199,6 +199,7 @@ CODEGENOPT(DebugTypeExtRefs, 1, 0) ///< Whether or not debug info should contain CODEGENOPT(DebugExplicitImport, 1, 0) ///< Whether or not debug info should ///< contain explicit imports for ///< anonymous namespaces +CODEGENOPT(EnableSplitDwarf, 1, 0) ///< Whether to enable split DWARF CODEGENOPT(SplitDwarfInlining, 1, 1) ///< Whether to include inlining info in the ///< skeleton CU to allow for symbolication ///< of inline stack frames without .dwo files. diff --git a/include/clang/Index/IndexSymbol.h b/include/clang/Index/IndexSymbol.h index bc34938..abb132f 100644 --- a/include/clang/Index/IndexSymbol.h +++ b/include/clang/Index/IndexSymbol.h @@ -59,6 +59,7 @@ enum class SymbolLanguage { C, ObjC, CXX, + Swift, }; /// Language specific sub-kinds. diff --git a/include/clang/Index/USRGeneration.h b/include/clang/Index/USRGeneration.h index 61f2c9d..8c661bd 100644 --- a/include/clang/Index/USRGeneration.h +++ b/include/clang/Index/USRGeneration.h @@ -30,10 +30,14 @@ static inline StringRef getUSRSpacePrefix() { bool generateUSRForDecl(const Decl *D, SmallVectorImpl &Buf); /// \brief Generate a USR fragment for an Objective-C class. -void generateUSRForObjCClass(StringRef Cls, raw_ostream &OS); +void generateUSRForObjCClass(StringRef Cls, raw_ostream &OS, + StringRef ExtSymbolDefinedIn = "", + StringRef CategoryContextExtSymbolDefinedIn = ""); /// \brief Generate a USR fragment for an Objective-C class category. -void generateUSRForObjCCategory(StringRef Cls, StringRef Cat, raw_ostream &OS); +void generateUSRForObjCCategory(StringRef Cls, StringRef Cat, raw_ostream &OS, + StringRef ClsExtSymbolDefinedIn = "", + StringRef CatExtSymbolDefinedIn = ""); /// \brief Generate a USR fragment for an Objective-C instance variable. The /// complete USR can be created by concatenating the USR for the @@ -48,7 +52,15 @@ void generateUSRForObjCMethod(StringRef Sel, bool IsInstanceMethod, void generateUSRForObjCProperty(StringRef Prop, bool isClassProp, raw_ostream &OS); /// \brief Generate a USR fragment for an Objective-C protocol. -void generateUSRForObjCProtocol(StringRef Prot, raw_ostream &OS); +void generateUSRForObjCProtocol(StringRef Prot, raw_ostream &OS, + StringRef ExtSymbolDefinedIn = ""); + +/// Generate USR fragment for a global (non-nested) enum. +void generateUSRForGlobalEnum(StringRef EnumName, raw_ostream &OS, + StringRef ExtSymbolDefinedIn = ""); + +/// Generate a USR fragment for an enum constant. +void generateUSRForEnumConstant(StringRef EnumConstantName, raw_ostream &OS); /// \brief Generate a USR for a macro, including the USR prefix. /// diff --git a/include/clang/Sema/Sema.h b/include/clang/Sema/Sema.h index bd68842..0f16cb9 100644 --- a/include/clang/Sema/Sema.h +++ b/include/clang/Sema/Sema.h @@ -1934,7 +1934,8 @@ public: /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. - DeclGroupPtrTy ActOnModuleDecl(SourceLocation ModuleLoc, ModuleDeclKind MDK, + DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, + SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path); /// \brief The parser has processed a module import declaration. @@ -8326,6 +8327,12 @@ private: /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; + /// Push new OpenMP function region for non-capturing function. + void pushOpenMPFunctionRegion(); + + /// Pop OpenMP function region for non-capturing function. + void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); + /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. diff --git a/lib/AST/Comment.cpp b/lib/AST/Comment.cpp index 7a7d3dd..dfa2a16 100644 --- a/lib/AST/Comment.cpp +++ b/lib/AST/Comment.cpp @@ -116,6 +116,9 @@ bool ParagraphComment::isWhitespaceNoCache() const { static TypeLoc lookThroughTypedefOrTypeAliasLocs(TypeLoc &SrcTL) { TypeLoc TL = SrcTL.IgnoreParens(); + // Look through attribute types. + if (AttributedTypeLoc AttributeTL = TL.getAs()) + return AttributeTL.getModifiedLoc(); // Look through qualified types. if (QualifiedTypeLoc QualifiedTL = TL.getAs()) return QualifiedTL.getUnqualifiedLoc(); @@ -280,8 +283,25 @@ void DeclInfo::fill() { case Decl::EnumConstant: case Decl::ObjCIvar: case Decl::ObjCAtDefsField: + case Decl::ObjCProperty: { + const TypeSourceInfo *TSI; + if (const auto *VD = dyn_cast(CommentDecl)) + TSI = VD->getTypeSourceInfo(); + else if (const auto *PD = dyn_cast(CommentDecl)) + TSI = PD->getTypeSourceInfo(); + else + TSI = nullptr; + if (TSI) { + TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc(); + FunctionTypeLoc FTL; + if (getFunctionTypeLoc(TL, FTL)) { + ParamVars = FTL.getParams(); + ReturnType = FTL.getReturnLoc().getType(); + } + } Kind = VariableKind; break; + } case Decl::Namespace: Kind = NamespaceKind; break; diff --git a/lib/AST/CommentSema.cpp b/lib/AST/CommentSema.cpp index d39a9b2..403454d 100644 --- a/lib/AST/CommentSema.cpp +++ b/lib/AST/CommentSema.cpp @@ -86,7 +86,7 @@ ParamCommandComment *Sema::actOnParamCommandStart( new (Allocator) ParamCommandComment(LocBegin, LocEnd, CommandID, CommandMarker); - if (!isFunctionDecl()) + if (!isFunctionDecl() && !isFunctionOrBlockPointerVarLikeDecl()) Diag(Command->getLocation(), diag::warn_doc_param_not_attached_to_a_function_decl) << CommandMarker @@ -584,7 +584,11 @@ void Sema::checkReturnsCommand(const BlockCommandComment *Command) { assert(ThisDeclInfo && "should not call this check on a bare comment"); - if (isFunctionDecl()) { + // We allow the return command for all @properties because it can be used + // to document the value that the property getter returns. + if (isObjCPropertyDecl()) + return; + if (isFunctionDecl() || isFunctionOrBlockPointerVarLikeDecl()) { if (ThisDeclInfo->ReturnType->isVoidType()) { unsigned DiagKind; switch (ThisDeclInfo->CommentDecl->getKind()) { @@ -610,8 +614,6 @@ void Sema::checkReturnsCommand(const BlockCommandComment *Command) { } return; } - else if (isObjCPropertyDecl()) - return; Diag(Command->getLocation(), diag::warn_doc_returns_not_attached_to_a_function_decl) @@ -844,6 +846,30 @@ bool Sema::isFunctionPointerVarDecl() { return false; } +bool Sema::isFunctionOrBlockPointerVarLikeDecl() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + if (ThisDeclInfo->getKind() != DeclInfo::VariableKind || + !ThisDeclInfo->CurrentDecl) + return false; + QualType QT; + if (const auto *VD = dyn_cast(ThisDeclInfo->CurrentDecl)) + QT = VD->getType(); + else if (const auto *PD = + dyn_cast(ThisDeclInfo->CurrentDecl)) + QT = PD->getType(); + else + return false; + // We would like to warn about the 'returns'/'param' commands for + // variables that don't directly specify the function type, so type aliases + // can be ignored. + if (QT->getAs()) + return false; + return QT->isFunctionPointerType() || QT->isBlockPointerType(); +} + bool Sema::isObjCPropertyDecl() { if (!ThisDeclInfo) return false; diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp index 094e8dc..0f2558e 100644 --- a/lib/AST/Decl.cpp +++ b/lib/AST/Decl.cpp @@ -2251,6 +2251,14 @@ bool VarDecl::checkInitIsICE() const { return Eval->IsICE; } +template +static DeclT *getDefinitionOrSelf(DeclT *D) { + assert(D); + if (auto *Def = D->getDefinition()) + return Def; + return D; +} + VarDecl *VarDecl::getTemplateInstantiationPattern() const { // If it's a variable template specialization, find the template or partial // specialization from which it was instantiated. @@ -2262,7 +2270,7 @@ VarDecl *VarDecl::getTemplateInstantiationPattern() const { break; VTD = NewVTD; } - return VTD->getTemplatedDecl()->getDefinition(); + return getDefinitionOrSelf(VTD->getTemplatedDecl()); } if (auto *VTPSD = From.dyn_cast()) { @@ -2271,7 +2279,7 @@ VarDecl *VarDecl::getTemplateInstantiationPattern() const { break; VTPSD = NewVTPSD; } - return VTPSD->getDefinition(); + return getDefinitionOrSelf(VTPSD); } } @@ -2280,23 +2288,18 @@ VarDecl *VarDecl::getTemplateInstantiationPattern() const { VarDecl *VD = getInstantiatedFromStaticDataMember(); while (auto *NewVD = VD->getInstantiatedFromStaticDataMember()) VD = NewVD; - return VD->getDefinition(); + return getDefinitionOrSelf(VD); } } if (VarTemplateDecl *VarTemplate = getDescribedVarTemplate()) { - while (VarTemplate->getInstantiatedFromMemberTemplate()) { if (VarTemplate->isMemberSpecialization()) break; VarTemplate = VarTemplate->getInstantiatedFromMemberTemplate(); } - assert((!VarTemplate->getTemplatedDecl() || - !isTemplateInstantiation(getTemplateSpecializationKind())) && - "couldn't find pattern for variable instantiation"); - - return VarTemplate->getTemplatedDecl(); + return getDefinitionOrSelf(VarTemplate->getTemplatedDecl()); } return nullptr; } @@ -3198,9 +3201,12 @@ bool FunctionDecl::isTemplateInstantiation() const { FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const { // Handle class scope explicit specialization special case. - if (getTemplateSpecializationKind() == TSK_ExplicitSpecialization) - return getClassScopeSpecializationPattern(); - + if (getTemplateSpecializationKind() == TSK_ExplicitSpecialization) { + if (auto *Spec = getClassScopeSpecializationPattern()) + return getDefinitionOrSelf(Spec); + return nullptr; + } + // If this is a generic lambda call operator specialization, its // instantiation pattern is always its primary template's pattern // even if its primary template was instantiated from another @@ -3212,16 +3218,10 @@ FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const { if (isGenericLambdaCallOperatorSpecialization( dyn_cast(this))) { - assert(getPrimaryTemplate() && "A generic lambda specialization must be " - "generated from a primary call operator " - "template"); - assert(getPrimaryTemplate()->getTemplatedDecl()->getBody() && - "A generic lambda call operator template must always have a body - " - "even if instantiated from a prototype (i.e. as written) member " - "template"); - return getPrimaryTemplate()->getTemplatedDecl(); + assert(getPrimaryTemplate() && "not a generic lambda call operator?"); + return getDefinitionOrSelf(getPrimaryTemplate()->getTemplatedDecl()); } - + if (FunctionTemplateDecl *Primary = getPrimaryTemplate()) { while (Primary->getInstantiatedFromMemberTemplate()) { // If we have hit a point where the user provided a specialization of @@ -3230,11 +3230,14 @@ FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const { break; Primary = Primary->getInstantiatedFromMemberTemplate(); } - - return Primary->getTemplatedDecl(); + + return getDefinitionOrSelf(Primary->getTemplatedDecl()); } - - return getInstantiatedFromMemberFunction(); + + if (auto *MFD = getInstantiatedFromMemberFunction()) + return getDefinitionOrSelf(MFD); + + return nullptr; } FunctionTemplateDecl *FunctionDecl::getPrimaryTemplate() const { @@ -3778,7 +3781,7 @@ EnumDecl *EnumDecl::getTemplateInstantiationPattern() const { EnumDecl *ED = getInstantiatedFromMemberEnum(); while (auto *NewED = ED->getInstantiatedFromMemberEnum()) ED = NewED; - return ED; + return getDefinitionOrSelf(ED); } } diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp index 2e5cec9..dd8f768 100644 --- a/lib/AST/DeclCXX.cpp +++ b/lib/AST/DeclCXX.cpp @@ -1364,6 +1364,13 @@ CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) { } const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const { + auto GetDefinitionOrSelf = + [](const CXXRecordDecl *D) -> const CXXRecordDecl * { + if (auto *Def = D->getDefinition()) + return Def; + return D; + }; + // If it's a class template specialization, find the template or partial // specialization from which it was instantiated. if (auto *TD = dyn_cast(this)) { @@ -1374,7 +1381,7 @@ const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const { break; CTD = NewCTD; } - return CTD->getTemplatedDecl()->getDefinition(); + return GetDefinitionOrSelf(CTD->getTemplatedDecl()); } if (auto *CTPSD = From.dyn_cast()) { @@ -1383,7 +1390,7 @@ const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const { break; CTPSD = NewCTPSD; } - return CTPSD->getDefinition(); + return GetDefinitionOrSelf(CTPSD); } } @@ -1392,7 +1399,7 @@ const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const { const CXXRecordDecl *RD = this; while (auto *NewRD = RD->getInstantiatedFromMemberClass()) RD = NewRD; - return RD->getDefinition(); + return GetDefinitionOrSelf(RD); } } diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp index 9218eb5..a12a380 100644 --- a/lib/AST/DeclObjC.cpp +++ b/lib/AST/DeclObjC.cpp @@ -539,9 +539,18 @@ void ObjCInterfaceDecl::getDesignatedInitializers( bool ObjCInterfaceDecl::isDesignatedInitializer(Selector Sel, const ObjCMethodDecl **InitMethod) const { + bool HasCompleteDef = isThisDeclarationADefinition(); + // During deserialization the data record for the ObjCInterfaceDecl could + // be made invariant by reusing the canonical decl. Take this into account + // when checking for the complete definition. + if (!HasCompleteDef && getCanonicalDecl()->hasDefinition() && + getCanonicalDecl()->getDefinition() == getDefinition()) + HasCompleteDef = true; + // Check for a complete definition and recover if not so. - if (!isThisDeclarationADefinition()) + if (!HasCompleteDef) return false; + if (data().ExternallyCompleted) LoadExternalDefinition(); diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp index 20059d9..85788b4 100644 --- a/lib/CodeGen/BackendUtil.cpp +++ b/lib/CodeGen/BackendUtil.cpp @@ -129,16 +129,20 @@ public: // that we add to the PassManagerBuilder. class PassManagerBuilderWrapper : public PassManagerBuilder { public: - PassManagerBuilderWrapper(const CodeGenOptions &CGOpts, + PassManagerBuilderWrapper(const Triple &TargetTriple, + const CodeGenOptions &CGOpts, const LangOptions &LangOpts) - : PassManagerBuilder(), CGOpts(CGOpts), LangOpts(LangOpts) {} + : PassManagerBuilder(), TargetTriple(TargetTriple), CGOpts(CGOpts), + LangOpts(LangOpts) {} + const Triple &getTargetTriple() const { return TargetTriple; } const CodeGenOptions &getCGOpts() const { return CGOpts; } const LangOptions &getLangOpts() const { return LangOpts; } + private: + const Triple &TargetTriple; const CodeGenOptions &CGOpts; const LangOptions &LangOpts; }; - } static void addObjCARCAPElimPass(const PassManagerBuilder &Builder, PassManagerBase &PM) { @@ -185,16 +189,35 @@ static void addSanitizerCoveragePass(const PassManagerBuilder &Builder, PM.add(createSanitizerCoverageModulePass(Opts)); } +// Check if ASan should use GC-friendly instrumentation for globals. +// First of all, there is no point if -fdata-sections is off (expect for MachO, +// where this is not a factor). Also, on ELF this feature requires an assembler +// extension that only works with -integrated-as at the moment. +static bool asanUseGlobalsGC(const Triple &T, const CodeGenOptions &CGOpts) { + switch (T.getObjectFormat()) { + case Triple::MachO: + case Triple::COFF: + return true; + case Triple::ELF: + return CGOpts.DataSections && !CGOpts.DisableIntegratedAS; + default: + return false; + } +} + static void addAddressSanitizerPasses(const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) { const PassManagerBuilderWrapper &BuilderWrapper = static_cast(Builder); + const Triple &T = BuilderWrapper.getTargetTriple(); const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts(); bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Address); bool UseAfterScope = CGOpts.SanitizeAddressUseAfterScope; + bool UseGlobalsGC = asanUseGlobalsGC(T, CGOpts); PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/ false, Recover, UseAfterScope)); - PM.add(createAddressSanitizerModulePass(/*CompileKernel*/false, Recover)); + PM.add(createAddressSanitizerModulePass(/*CompileKernel*/ false, Recover, + UseGlobalsGC)); } static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder, @@ -407,6 +430,8 @@ static void initTargetOptions(llvm::TargetOptions &Options, Options.EmulatedTLS = CodeGenOpts.EmulatedTLS; Options.DebuggerTuning = CodeGenOpts.getDebuggerTuning(); + if (CodeGenOpts.EnableSplitDwarf) + Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile; Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll; Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels; Options.MCOptions.MCUseDwarfDirectory = !CodeGenOpts.NoDwarfDirectoryAsm; @@ -434,8 +459,6 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM, if (CodeGenOpts.DisableLLVMPasses) return; - PassManagerBuilderWrapper PMBuilder(CodeGenOpts, LangOpts); - // Figure out TargetLibraryInfo. This needs to be added to MPM and FPM // manually (and not via PMBuilder), since some passes (eg. InstrProfiling) // are inserted before PMBuilder ones - they'd get the default-constructed @@ -444,6 +467,8 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM, std::unique_ptr TLII( createTLII(TargetTriple, CodeGenOpts)); + PassManagerBuilderWrapper PMBuilder(TargetTriple, CodeGenOpts, LangOpts); + // At O0 and O1 we only run the always inliner which is more efficient. At // higher optimization levels we run the normal inliner. if (CodeGenOpts.OptimizationLevel <= 1) { diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp index 2f6a2b9..dd32a44 100644 --- a/lib/CodeGen/CGDebugInfo.cpp +++ b/lib/CodeGen/CGDebugInfo.cpp @@ -528,12 +528,14 @@ void CGDebugInfo::CreateCompileUnit() { // Create new compile unit. // FIXME - Eliminate TheCU. TheCU = DBuilder.createCompileUnit( - LangTag, DBuilder.createFile(remapDIPath(MainFileName), - remapDIPath(getCurrentDirname()), CSKind, - Checksum), + LangTag, + DBuilder.createFile(remapDIPath(MainFileName), + remapDIPath(getCurrentDirname()), CSKind, Checksum), Producer, LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers, - CGM.getCodeGenOpts().SplitDwarfFile, EmissionKind, 0 /* DWOid */, - CGM.getCodeGenOpts().SplitDwarfInlining, + CGM.getCodeGenOpts().EnableSplitDwarf + ? "" + : CGM.getCodeGenOpts().SplitDwarfFile, + EmissionKind, 0 /* DWOid */, CGM.getCodeGenOpts().SplitDwarfInlining, CGM.getCodeGenOpts().DebugInfoForProfiling); } diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index 719147a..d0aacf6 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -533,15 +533,6 @@ bool CodeGenFunction::sanitizePerformTypeCheck() const { SanOpts.has(SanitizerKind::Vptr); } -/// Check if a runtime null check for \p Ptr can be omitted. -static bool canOmitPointerNullCheck(llvm::Value *Ptr) { - // Note: do not perform any constant-folding in this function. That is best - // left to the IR builder. - - // Pointers to alloca'd memory are non-null. - return isa(Ptr->stripPointerCastsNoFollowAliases()); -} - void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *Ptr, QualType Ty, CharUnits Alignment, @@ -560,11 +551,16 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, SmallVector, 3> Checks; llvm::BasicBlock *Done = nullptr; + // Quickly determine whether we have a pointer to an alloca. It's possible + // to skip null checks, and some alignment checks, for these pointers. This + // can reduce compile-time significantly. + auto PtrToAlloca = + dyn_cast(Ptr->stripPointerCastsNoFollowAliases()); + bool AllowNullPointers = TCK == TCK_DowncastPointer || TCK == TCK_Upcast || TCK == TCK_UpcastToVirtualBase; if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) && - !SkippedChecks.has(SanitizerKind::Null) && - !canOmitPointerNullCheck(Ptr)) { + !SkippedChecks.has(SanitizerKind::Null) && !PtrToAlloca) { // The glvalue must not be an empty glvalue. llvm::Value *IsNonNull = Builder.CreateIsNotNull(Ptr); @@ -617,7 +613,8 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity(); // The glvalue must be suitably aligned. - if (AlignVal > 1) { + if (AlignVal > 1 && + (!PtrToAlloca || PtrToAlloca->getAlignment() < AlignVal)) { llvm::Value *Align = Builder.CreateAnd(Builder.CreatePtrToInt(Ptr, IntPtrTy), llvm::ConstantInt::get(IntPtrTy, AlignVal - 1)); diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp index 874b6a6..d1a706b 100644 --- a/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/lib/CodeGen/CGOpenMPRuntime.cpp @@ -2466,16 +2466,14 @@ static int addMonoNonMonoModifier(OpenMPSchedType Schedule, return Schedule | Modifier; } -void CGOpenMPRuntime::emitForDispatchInit(CodeGenFunction &CGF, - SourceLocation Loc, - const OpenMPScheduleTy &ScheduleKind, - unsigned IVSize, bool IVSigned, - bool Ordered, llvm::Value *UB, - llvm::Value *Chunk) { +void CGOpenMPRuntime::emitForDispatchInit( + CodeGenFunction &CGF, SourceLocation Loc, + const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, + bool Ordered, const DispatchRTInput &DispatchValues) { if (!CGF.HaveInsertPoint()) return; - OpenMPSchedType Schedule = - getRuntimeSchedule(ScheduleKind.Schedule, Chunk != nullptr, Ordered); + OpenMPSchedType Schedule = getRuntimeSchedule( + ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered); assert(Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && @@ -2486,14 +2484,14 @@ void CGOpenMPRuntime::emitForDispatchInit(CodeGenFunction &CGF, // kmp_int[32|64] stride, kmp_int[32|64] chunk); // If the Chunk was not specified in the clause - use default value 1. - if (Chunk == nullptr) - Chunk = CGF.Builder.getIntN(IVSize, 1); + llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk + : CGF.Builder.getIntN(IVSize, 1); llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), CGF.Builder.getInt32(addMonoNonMonoModifier( Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type - CGF.Builder.getIntN(IVSize, 0), // Lower - UB, // Upper + DispatchValues.LB, // Lower + DispatchValues.UB, // Upper CGF.Builder.getIntN(IVSize, 1), // Stride Chunk // Chunk }; diff --git a/lib/CodeGen/CGOpenMPRuntime.h b/lib/CodeGen/CGOpenMPRuntime.h index 7901a6b..6f460f1 100644 --- a/lib/CodeGen/CGOpenMPRuntime.h +++ b/lib/CodeGen/CGOpenMPRuntime.h @@ -672,16 +672,50 @@ public: /// virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; + /// struct with the values to be passed to the dispatch runtime function + struct DispatchRTInput { + /// Loop lower bound + llvm::Value *LB = nullptr; + /// Loop upper bound + llvm::Value *UB = nullptr; + /// Chunk size specified using 'schedule' clause (nullptr if chunk + /// was not specified) + llvm::Value *Chunk = nullptr; + DispatchRTInput() = default; + DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk) + : LB(LB), UB(UB), Chunk(Chunk) {} + }; + + /// Call the appropriate runtime routine to initialize it before start + /// of loop. + + /// This is used for non static scheduled types and when the ordered + /// clause is present on the loop construct. + /// Depending on the loop schedule, it is necessary to call some runtime + /// routine before start of the OpenMP loop to get the loop upper / lower + /// bounds \a LB and \a UB and stride \a ST. + /// + /// \param CGF Reference to current CodeGenFunction. + /// \param Loc Clang source location. + /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. + /// \param IVSize Size of the iteration variable in bits. + /// \param IVSigned Sign of the interation variable. + /// \param Ordered true if loop is ordered, false otherwise. + /// \param DispatchValues struct containing llvm values for lower bound, upper + /// bound, and chunk expression. + /// For the default (nullptr) value, the chunk 1 will be used. + /// virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, - llvm::Value *UB, - llvm::Value *Chunk = nullptr); + const DispatchRTInput &DispatchValues); /// \brief Call the appropriate runtime routine to initialize it before start /// of loop. /// - /// Depending on the loop schedule, it is nesessary to call some runtime + /// This is used only in case of static schedule, when the user did not + /// specify a ordered clause on the loop construct. + /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// diff --git a/lib/CodeGen/CGStmtOpenMP.cpp b/lib/CodeGen/CGStmtOpenMP.cpp index 22269e4..f738dd0 100644 --- a/lib/CodeGen/CGStmtOpenMP.cpp +++ b/lib/CodeGen/CGStmtOpenMP.cpp @@ -87,7 +87,8 @@ public: class OMPParallelScope final : public OMPLexicalScope { bool EmitPreInitStmt(const OMPExecutableDirective &S) { OpenMPDirectiveKind Kind = S.getDirectiveKind(); - return !isOpenMPTargetExecutionDirective(Kind) && + return !(isOpenMPTargetExecutionDirective(Kind) || + isOpenMPLoopBoundSharingDirective(Kind)) && isOpenMPParallelDirective(Kind); } @@ -1249,10 +1250,20 @@ static void emitPostUpdateForReductionClause( CGF.EmitBlock(DoneBB, /*IsFinished=*/true); } -static void emitCommonOMPParallelDirective(CodeGenFunction &CGF, - const OMPExecutableDirective &S, - OpenMPDirectiveKind InnermostKind, - const RegionCodeGenTy &CodeGen) { +namespace { +/// Codegen lambda for appending distribute lower and upper bounds to outlined +/// parallel function. This is necessary for combined constructs such as +/// 'distribute parallel for' +typedef llvm::function_ref &)> + CodeGenBoundParametersTy; +} // anonymous namespace + +static void emitCommonOMPParallelDirective( + CodeGenFunction &CGF, const OMPExecutableDirective &S, + OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, + const CodeGenBoundParametersTy &CodeGenBoundParameters) { const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); @@ -1279,11 +1290,20 @@ static void emitCommonOMPParallelDirective(CodeGenFunction &CGF, OMPParallelScope Scope(CGF, S); llvm::SmallVector CapturedVars; + // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk + // lower and upper bounds with the pragma 'for' chunking mechanism. + // The following lambda takes care of appending the lower and upper bound + // parameters when necessary + CodeGenBoundParameters(CGF, S, CapturedVars); CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn, CapturedVars, IfCond); } +static void emitEmptyBoundParameters(CodeGenFunction &, + const OMPExecutableDirective &, + llvm::SmallVectorImpl &) {} + void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { // Emit parallel region as a standalone region. auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { @@ -1304,7 +1324,8 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { CGF.EmitStmt(cast(S.getAssociatedStmt())->getCapturedStmt()); CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); }; - emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen); + emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, + emitEmptyBoundParameters); emitPostUpdateForReductionClause( *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; }); } @@ -1649,6 +1670,13 @@ void CodeGenFunction::EmitOMPSimdFinal( EmitBlock(DoneBB, /*IsFinished=*/true); } +static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, + const OMPLoopDirective &S, + CodeGenFunction::JumpDest LoopExit) { + CGF.EmitOMPLoopBody(S, LoopExit); + CGF.EmitStopPoint(&S); +}; + void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { OMPLoopScope PreInitScope(CGF, S); @@ -1731,9 +1759,12 @@ void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); } -void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic, - const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, - Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) { +void CodeGenFunction::EmitOMPOuterLoop( + bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, + CodeGenFunction::OMPPrivateScope &LoopScope, + const CodeGenFunction::OMPLoopArguments &LoopArgs, + const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, + const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { auto &RT = CGM.getOpenMPRuntime(); const Expr *IVExpr = S.getIterationVariable(); @@ -1751,15 +1782,18 @@ void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic, llvm::Value *BoolCondVal = nullptr; if (!DynamicOrOrdered) { - // UB = min(UB, GlobalUB) - EmitIgnoredExpr(S.getEnsureUpperBound()); + // UB = min(UB, GlobalUB) or + // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. + // 'distribute parallel for') + EmitIgnoredExpr(LoopArgs.EUB); // IV = LB - EmitIgnoredExpr(S.getInit()); + EmitIgnoredExpr(LoopArgs.Init); // IV < UB - BoolCondVal = EvaluateExprAsBool(S.getCond()); + BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); } else { - BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, IL, - LB, UB, ST); + BoolCondVal = + RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, LoopArgs.IL, + LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); } // If there are any cleanups between here and the loop-exit scope, @@ -1779,7 +1813,7 @@ void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic, // Emit "IV = LB" (in case of static schedule, we have already calculated new // LB for loop condition and emitted it above). if (DynamicOrOrdered) - EmitIgnoredExpr(S.getInit()); + EmitIgnoredExpr(LoopArgs.Init); // Create a block for the increment. auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); @@ -1793,24 +1827,27 @@ void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic, EmitOMPSimdInit(S, IsMonotonic); SourceLocation Loc = S.getLocStart(); - EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), - [&S, LoopExit](CodeGenFunction &CGF) { - CGF.EmitOMPLoopBody(S, LoopExit); - CGF.EmitStopPoint(&S); - }, - [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) { - if (Ordered) { - CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd( - CGF, Loc, IVSize, IVSigned); - } - }); + + // when 'distribute' is not combined with a 'for': + // while (idx <= UB) { BODY; ++idx; } + // when 'distribute' is combined with a 'for' + // (e.g. 'distribute parallel for') + // while (idx <= UB) { ; idx += ST; } + EmitOMPInnerLoop( + S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, + [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { + CodeGenLoop(CGF, S, LoopExit); + }, + [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { + CodeGenOrdered(CGF, Loc, IVSize, IVSigned); + }); EmitBlock(Continue.getBlock()); BreakContinueStack.pop_back(); if (!DynamicOrOrdered) { // Emit "LB = LB + Stride", "UB = UB + Stride". - EmitIgnoredExpr(S.getNextLowerBound()); - EmitIgnoredExpr(S.getNextUpperBound()); + EmitIgnoredExpr(LoopArgs.NextLB); + EmitIgnoredExpr(LoopArgs.NextUB); } EmitBranch(CondBlock); @@ -1829,7 +1866,8 @@ void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic, void CodeGenFunction::EmitOMPForOuterLoop( const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, - Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) { + const OMPLoopArguments &LoopArgs, + const CodeGenDispatchBoundsTy &CGDispatchBounds) { auto &RT = CGM.getOpenMPRuntime(); // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). @@ -1838,7 +1876,7 @@ void CodeGenFunction::EmitOMPForOuterLoop( assert((Ordered || !RT.isStaticNonchunked(ScheduleKind.Schedule, - /*Chunked=*/Chunk != nullptr)) && + LoopArgs.Chunk != nullptr)) && "static non-chunked schedule does not need outer loop"); // Emit outer loop. @@ -1896,22 +1934,46 @@ void CodeGenFunction::EmitOMPForOuterLoop( const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); if (DynamicOrOrdered) { - llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration()); + auto DispatchBounds = CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); + llvm::Value *LBVal = DispatchBounds.first; + llvm::Value *UBVal = DispatchBounds.second; + CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, + LoopArgs.Chunk}; RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind, IVSize, - IVSigned, Ordered, UBVal, Chunk); + IVSigned, Ordered, DipatchRTInputValues); } else { RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, - Ordered, IL, LB, UB, ST, Chunk); + Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, + LoopArgs.ST, LoopArgs.Chunk); } - EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, Ordered, LB, UB, - ST, IL, Chunk); + auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, + const unsigned IVSize, + const bool IVSigned) { + if (Ordered) { + CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, + IVSigned); + } + }; + + OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, + LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); + OuterLoopArgs.IncExpr = S.getInc(); + OuterLoopArgs.Init = S.getInit(); + OuterLoopArgs.Cond = S.getCond(); + OuterLoopArgs.NextLB = S.getNextLowerBound(); + OuterLoopArgs.NextUB = S.getNextUpperBound(); + EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, + emitOMPLoopBodyWithStopPoint, CodeGenOrdered); } +static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, + const unsigned IVSize, const bool IVSigned) {} + void CodeGenFunction::EmitOMPDistributeOuterLoop( - OpenMPDistScheduleClauseKind ScheduleKind, - const OMPDistributeDirective &S, OMPPrivateScope &LoopScope, - Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) { + OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, + OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, + const CodeGenLoopTy &CodeGenLoopContent) { auto &RT = CGM.getOpenMPRuntime(); @@ -1924,26 +1986,159 @@ void CodeGenFunction::EmitOMPDistributeOuterLoop( const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); - RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind, - IVSize, IVSigned, /* Ordered = */ false, - IL, LB, UB, ST, Chunk); + RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind, IVSize, + IVSigned, /* Ordered = */ false, LoopArgs.IL, + LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, + LoopArgs.Chunk); - EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, - S, LoopScope, /* Ordered = */ false, LB, UB, ST, IL, Chunk); + // for combined 'distribute' and 'for' the increment expression of distribute + // is store in DistInc. For 'distribute' alone, it is in Inc. + Expr *IncExpr; + if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) + IncExpr = S.getDistInc(); + else + IncExpr = S.getInc(); + + // this routine is shared by 'omp distribute parallel for' and + // 'omp distribute': select the right EUB expression depending on the + // directive + OMPLoopArguments OuterLoopArgs; + OuterLoopArgs.LB = LoopArgs.LB; + OuterLoopArgs.UB = LoopArgs.UB; + OuterLoopArgs.ST = LoopArgs.ST; + OuterLoopArgs.IL = LoopArgs.IL; + OuterLoopArgs.Chunk = LoopArgs.Chunk; + OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) + ? S.getCombinedEnsureUpperBound() + : S.getEnsureUpperBound(); + OuterLoopArgs.IncExpr = IncExpr; + OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) + ? S.getCombinedInit() + : S.getInit(); + OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) + ? S.getCombinedCond() + : S.getCond(); + OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) + ? S.getCombinedNextLowerBound() + : S.getNextLowerBound(); + OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) + ? S.getCombinedNextUpperBound() + : S.getNextUpperBound(); + + EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, + LoopScope, OuterLoopArgs, CodeGenLoopContent, + emitEmptyOrdered); +} + +/// Emit a helper variable and return corresponding lvalue. +static LValue EmitOMPHelperVar(CodeGenFunction &CGF, + const DeclRefExpr *Helper) { + auto VDecl = cast(Helper->getDecl()); + CGF.EmitVarDecl(*VDecl); + return CGF.EmitLValue(Helper); +} + +static std::pair +emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, + const OMPExecutableDirective &S) { + const OMPLoopDirective &LS = cast(S); + LValue LB = + EmitOMPHelperVar(CGF, cast(LS.getLowerBoundVariable())); + LValue UB = + EmitOMPHelperVar(CGF, cast(LS.getUpperBoundVariable())); + + // When composing 'distribute' with 'for' (e.g. as in 'distribute + // parallel for') we need to use the 'distribute' + // chunk lower and upper bounds rather than the whole loop iteration + // space. These are parameters to the outlined function for 'parallel' + // and we copy the bounds of the previous schedule into the + // the current ones. + LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); + LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); + llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(PrevLB, SourceLocation()); + PrevLBVal = CGF.EmitScalarConversion( + PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), + LS.getIterationVariable()->getType(), SourceLocation()); + llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(PrevUB, SourceLocation()); + PrevUBVal = CGF.EmitScalarConversion( + PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), + LS.getIterationVariable()->getType(), SourceLocation()); + + CGF.EmitStoreOfScalar(PrevLBVal, LB); + CGF.EmitStoreOfScalar(PrevUBVal, UB); + + return {LB, UB}; +} + +/// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then +/// we need to use the LB and UB expressions generated by the worksharing +/// code generation support, whereas in non combined situations we would +/// just emit 0 and the LastIteration expression +/// This function is necessary due to the difference of the LB and UB +/// types for the RT emission routines for 'for_static_init' and +/// 'for_dispatch_init' +static std::pair +emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, + const OMPExecutableDirective &S, + Address LB, Address UB) { + const OMPLoopDirective &LS = cast(S); + const Expr *IVExpr = LS.getIterationVariable(); + // when implementing a dynamic schedule for a 'for' combined with a + // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop + // is not normalized as each team only executes its own assigned + // distribute chunk + QualType IteratorTy = IVExpr->getType(); + llvm::Value *LBVal = CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, + SourceLocation()); + llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, + SourceLocation()); + return {LBVal, UBVal}; +}; + +static void emitDistributeParallelForDistributeInnerBoundParams( + CodeGenFunction &CGF, const OMPExecutableDirective &S, + llvm::SmallVectorImpl &CapturedVars) { + const auto &Dir = cast(S); + LValue LB = + CGF.EmitLValue(cast(Dir.getCombinedLowerBoundVariable())); + auto LBCast = CGF.Builder.CreateIntCast( + CGF.Builder.CreateLoad(LB.getAddress()), CGF.SizeTy, /*isSigned=*/false); + CapturedVars.push_back(LBCast); + LValue UB = + CGF.EmitLValue(cast(Dir.getCombinedUpperBoundVariable())); + + auto UBCast = CGF.Builder.CreateIntCast( + CGF.Builder.CreateLoad(UB.getAddress()), CGF.SizeTy, /*isSigned=*/false); + CapturedVars.push_back(UBCast); +}; + +static void +emitInnerParallelForWhenCombined(CodeGenFunction &CGF, + const OMPLoopDirective &S, + CodeGenFunction::JumpDest LoopExit) { + auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, + PrePostActionTy &) { + CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), + emitDistributeParallelForInnerBounds, + emitDistributeParallelForDispatchBounds); + }; + + emitCommonOMPParallelDirective( + CGF, S, OMPD_for, CGInlinedWorksharingLoop, + emitDistributeParallelForDistributeInnerBoundParams); } void CodeGenFunction::EmitOMPDistributeParallelForDirective( const OMPDistributeParallelForDirective &S) { + auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { + CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, + S.getDistInc()); + }; OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); - CGM.getOpenMPRuntime().emitInlinedDirective( - *this, OMPD_distribute_parallel_for, - [&S](CodeGenFunction &CGF, PrePostActionTy &) { - OMPLoopScope PreInitScope(CGF, S); - OMPCancelStackRAII CancelRegion(CGF, OMPD_distribute_parallel_for, - /*HasCancel=*/false); - CGF.EmitStmt( - cast(S.getAssociatedStmt())->getCapturedStmt()); - }); + OMPCancelStackRAII CancelRegion(*this, OMPD_distribute_parallel_for, + /*HasCancel=*/false); + CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen, + /*HasCancel=*/false); } void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( @@ -2081,14 +2276,6 @@ void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( }); } -/// \brief Emit a helper variable and return corresponding lvalue. -static LValue EmitOMPHelperVar(CodeGenFunction &CGF, - const DeclRefExpr *Helper) { - auto VDecl = cast(Helper->getDecl()); - CGF.EmitVarDecl(*VDecl); - return CGF.EmitLValue(Helper); -} - namespace { struct ScheduleKindModifiersTy { OpenMPScheduleClauseKind Kind; @@ -2101,7 +2288,10 @@ namespace { }; } // namespace -bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { +bool CodeGenFunction::EmitOMPWorksharingLoop( + const OMPLoopDirective &S, Expr *EUB, + const CodeGenLoopBoundsTy &CodeGenLoopBounds, + const CodeGenDispatchBoundsTy &CGDispatchBounds) { // Emit the loop iteration variable. auto IVExpr = cast(S.getIterationVariable()); auto IVDecl = cast(IVExpr->getDecl()); @@ -2151,10 +2341,10 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { emitAlignedClause(*this, S); EmitOMPLinearClauseInit(S); // Emit helper vars inits. - LValue LB = - EmitOMPHelperVar(*this, cast(S.getLowerBoundVariable())); - LValue UB = - EmitOMPHelperVar(*this, cast(S.getUpperBoundVariable())); + + std::pair Bounds = CodeGenLoopBounds(*this, S); + LValue LB = Bounds.first; + LValue UB = Bounds.second; LValue ST = EmitOMPHelperVar(*this, cast(S.getStrideVariable())); LValue IL = @@ -2240,9 +2430,11 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; // Emit the outer loop, which requests its work chunk [LB..UB] from // runtime and runs the inner loop to process it. + const OMPLoopArguments LoopArguments(LB.getAddress(), UB.getAddress(), + ST.getAddress(), IL.getAddress(), + Chunk, EUB); EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, - LB.getAddress(), UB.getAddress(), ST.getAddress(), - IL.getAddress(), Chunk); + LoopArguments, CGDispatchBounds); } if (isOpenMPSimdDirective(S.getDirectiveKind())) { EmitOMPSimdFinal(S, @@ -2280,12 +2472,42 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { return HasLastprivateClause; } +/// The following two functions generate expressions for the loop lower +/// and upper bounds in case of static and dynamic (dispatch) schedule +/// of the associated 'for' or 'distribute' loop. +static std::pair +emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { + const OMPLoopDirective &LS = cast(S); + LValue LB = + EmitOMPHelperVar(CGF, cast(LS.getLowerBoundVariable())); + LValue UB = + EmitOMPHelperVar(CGF, cast(LS.getUpperBoundVariable())); + return {LB, UB}; +} + +/// When dealing with dispatch schedules (e.g. dynamic, guided) we do not +/// consider the lower and upper bound expressions generated by the +/// worksharing loop support, but we use 0 and the iteration space size as +/// constants +static std::pair +emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, + Address LB, Address UB) { + const OMPLoopDirective &LS = cast(S); + const Expr *IVExpr = LS.getIterationVariable(); + const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); + llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); + llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); + return {LBVal, UBVal}; +} + void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { bool HasLastprivates = false; auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel()); - HasLastprivates = CGF.EmitOMPWorksharingLoop(S); + HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), + emitForLoopBounds, + emitDispatchForLoopBounds); }; { OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); @@ -2303,7 +2525,9 @@ void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { bool HasLastprivates = false; auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { - HasLastprivates = CGF.EmitOMPWorksharingLoop(S); + HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), + emitForLoopBounds, + emitDispatchForLoopBounds); }; { OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); @@ -2554,9 +2778,11 @@ void CodeGenFunction::EmitOMPParallelForDirective( // directives: 'parallel' with 'for' directive. auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel()); - CGF.EmitOMPWorksharingLoop(S); + CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, + emitDispatchForLoopBounds); }; - emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen); + emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, + emitEmptyBoundParameters); } void CodeGenFunction::EmitOMPParallelForSimdDirective( @@ -2564,9 +2790,11 @@ void CodeGenFunction::EmitOMPParallelForSimdDirective( // Emit directive as a combined directive that consists of two implicit // directives: 'parallel' with 'for' directive. auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { - CGF.EmitOMPWorksharingLoop(S); + CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, + emitDispatchForLoopBounds); }; - emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen); + emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen, + emitEmptyBoundParameters); } void CodeGenFunction::EmitOMPParallelSectionsDirective( @@ -2576,7 +2804,8 @@ void CodeGenFunction::EmitOMPParallelSectionsDirective( auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { CGF.EmitSections(S); }; - emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen); + emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, + emitEmptyBoundParameters); } void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S, @@ -2794,7 +3023,9 @@ void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { }(), S.getLocStart()); } -void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) { +void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, + const CodeGenLoopTy &CodeGenLoop, + Expr *IncExpr) { // Emit the loop iteration variable. auto IVExpr = cast(S.getIterationVariable()); auto IVDecl = cast(IVExpr->getDecl()); @@ -2835,10 +3066,17 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) { // Emit 'then' code. { // Emit helper vars inits. - LValue LB = - EmitOMPHelperVar(*this, cast(S.getLowerBoundVariable())); - LValue UB = - EmitOMPHelperVar(*this, cast(S.getUpperBoundVariable())); + + LValue LB = EmitOMPHelperVar( + *this, cast( + (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) + ? S.getCombinedLowerBoundVariable() + : S.getLowerBoundVariable()))); + LValue UB = EmitOMPHelperVar( + *this, cast( + (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) + ? S.getCombinedUpperBoundVariable() + : S.getUpperBoundVariable()))); LValue ST = EmitOMPHelperVar(*this, cast(S.getStrideVariable())); LValue IL = @@ -2890,15 +3128,25 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) { auto LoopExit = getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); // UB = min(UB, GlobalUB); - EmitIgnoredExpr(S.getEnsureUpperBound()); + EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) + ? S.getCombinedEnsureUpperBound() + : S.getEnsureUpperBound()); // IV = LB; - EmitIgnoredExpr(S.getInit()); + EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) + ? S.getCombinedInit() + : S.getInit()); + + Expr *Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) + ? S.getCombinedCond() + : S.getCond(); + + // for distribute alone, codegen // while (idx <= UB) { BODY; ++idx; } - EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), - S.getInc(), - [&S, LoopExit](CodeGenFunction &CGF) { - CGF.EmitOMPLoopBody(S, LoopExit); - CGF.EmitStopPoint(&S); + // when combined with 'for' (e.g. as in 'distribute parallel for') + // while (idx <= UB) { ; idx += ST; } + EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), Cond, IncExpr, + [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { + CodeGenLoop(CGF, S, LoopExit); }, [](CodeGenFunction &) {}); EmitBlock(LoopExit.getBlock()); @@ -2907,9 +3155,11 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) { } else { // Emit the outer loop, which requests its work chunk [LB..UB] from // runtime and runs the inner loop to process it. - EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, - LB.getAddress(), UB.getAddress(), ST.getAddress(), - IL.getAddress(), Chunk); + const OMPLoopArguments LoopArguments = { + LB.getAddress(), UB.getAddress(), ST.getAddress(), IL.getAddress(), + Chunk}; + EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, + CodeGenLoop); } // Emit final copy of the lastprivate variables if IsLastIter != 0. @@ -2931,7 +3181,8 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) { void CodeGenFunction::EmitOMPDistributeDirective( const OMPDistributeDirective &S) { auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { - CGF.EmitOMPDistributeLoop(S); + + CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); }; OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen, @@ -3840,7 +4091,8 @@ static void emitTargetParallelRegion(CodeGenFunction &CGF, CGF.EmitStmt(CS->getCapturedStmt()); CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); }; - emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen); + emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, + emitEmptyBoundParameters); emitPostUpdateForReductionClause( CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; }); } diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h index fa72019..1ded824 100644 --- a/lib/CodeGen/CodeGenFunction.h +++ b/lib/CodeGen/CodeGenFunction.h @@ -175,6 +175,25 @@ public: // because of jumps. VarBypassDetector Bypasses; + // CodeGen lambda for loops and support for ordered clause + typedef llvm::function_ref + CodeGenLoopTy; + typedef llvm::function_ref + CodeGenOrderedTy; + + // Codegen lambda for loop bounds in worksharing loop constructs + typedef llvm::function_ref( + CodeGenFunction &, const OMPExecutableDirective &S)> + CodeGenLoopBoundsTy; + + // Codegen lambda for loop bounds in dispatch-based loop implementation + typedef llvm::function_ref( + CodeGenFunction &, const OMPExecutableDirective &S, Address LB, + Address UB)> + CodeGenDispatchBoundsTy; + /// \brief CGBuilder insert helper. This function is called after an /// instruction is created using Builder. void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, @@ -2756,7 +2775,6 @@ public: void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S); void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S); void EmitOMPDistributeDirective(const OMPDistributeDirective &S); - void EmitOMPDistributeLoop(const OMPDistributeDirective &S); void EmitOMPDistributeParallelForDirective( const OMPDistributeParallelForDirective &S); void EmitOMPDistributeParallelForSimdDirective( @@ -2813,32 +2831,78 @@ public: void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S, OMPPrivateScope &LoopScope); + /// Helper for the OpenMP loop directives. + void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit); + + /// \brief Emit code for the worksharing loop-based directive. + /// \return true, if this construct has any lastprivate clause, false - + /// otherwise. + bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB, + const CodeGenLoopBoundsTy &CodeGenLoopBounds, + const CodeGenDispatchBoundsTy &CGDispatchBounds); + private: /// Helpers for blocks llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info); /// Helpers for the OpenMP loop directives. - void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit); void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false); void EmitOMPSimdFinal( const OMPLoopDirective &D, const llvm::function_ref &CondGen); - /// \brief Emit code for the worksharing loop-based directive. - /// \return true, if this construct has any lastprivate clause, false - - /// otherwise. - bool EmitOMPWorksharingLoop(const OMPLoopDirective &S); - void EmitOMPOuterLoop(bool IsMonotonic, bool DynamicOrOrdered, - const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, - Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk); + + void EmitOMPDistributeLoop(const OMPLoopDirective &S, + const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr); + + /// struct with the values to be passed to the OpenMP loop-related functions + struct OMPLoopArguments { + /// loop lower bound + Address LB = Address::invalid(); + /// loop upper bound + Address UB = Address::invalid(); + /// loop stride + Address ST = Address::invalid(); + /// isLastIteration argument for runtime functions + Address IL = Address::invalid(); + /// Chunk value generated by sema + llvm::Value *Chunk = nullptr; + /// EnsureUpperBound + Expr *EUB = nullptr; + /// IncrementExpression + Expr *IncExpr = nullptr; + /// Loop initialization + Expr *Init = nullptr; + /// Loop exit condition + Expr *Cond = nullptr; + /// Update of LB after a whole chunk has been executed + Expr *NextLB = nullptr; + /// Update of UB after a whole chunk has been executed + Expr *NextUB = nullptr; + OMPLoopArguments() = default; + OMPLoopArguments(Address LB, Address UB, Address ST, Address IL, + llvm::Value *Chunk = nullptr, Expr *EUB = nullptr, + Expr *IncExpr = nullptr, Expr *Init = nullptr, + Expr *Cond = nullptr, Expr *NextLB = nullptr, + Expr *NextUB = nullptr) + : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB), + IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB), + NextUB(NextUB) {} + }; + void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic, + const OMPLoopDirective &S, OMPPrivateScope &LoopScope, + const OMPLoopArguments &LoopArgs, + const CodeGenLoopTy &CodeGenLoop, + const CodeGenOrderedTy &CodeGenOrdered); void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, const OMPLoopDirective &S, - OMPPrivateScope &LoopScope, bool Ordered, Address LB, - Address UB, Address ST, Address IL, - llvm::Value *Chunk); - void EmitOMPDistributeOuterLoop( - OpenMPDistScheduleClauseKind ScheduleKind, - const OMPDistributeDirective &S, OMPPrivateScope &LoopScope, - Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk); + OMPPrivateScope &LoopScope, bool Ordered, + const OMPLoopArguments &LoopArgs, + const CodeGenDispatchBoundsTy &CGDispatchBounds); + void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind, + const OMPLoopDirective &S, + OMPPrivateScope &LoopScope, + const OMPLoopArguments &LoopArgs, + const CodeGenLoopTy &CodeGenLoopContent); /// \brief Emit code for sections directive. void EmitSections(const OMPExecutableDirective &S); diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp index 1920397..25d32f1 100644 --- a/lib/CodeGen/CodeGenModule.cpp +++ b/lib/CodeGen/CodeGenModule.cpp @@ -565,12 +565,8 @@ void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst, void CodeGenModule::DecorateInstructionWithInvariantGroup( llvm::Instruction *I, const CXXRecordDecl *RD) { - llvm::Metadata *MD = CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0)); - auto *MetaDataNode = dyn_cast(MD); - // Check if we have to wrap MDString in MDNode. - if (!MetaDataNode) - MetaDataNode = llvm::MDNode::get(getLLVMContext(), MD); - I->setMetadata(llvm::LLVMContext::MD_invariant_group, MetaDataNode); + I->setMetadata(llvm::LLVMContext::MD_invariant_group, + llvm::MDNode::get(getLLVMContext(), {})); } void CodeGenModule::Error(SourceLocation loc, StringRef message) { diff --git a/lib/CodeGen/CodeGenPGO.cpp b/lib/CodeGen/CodeGenPGO.cpp index 6acedc0..9e19353 100644 --- a/lib/CodeGen/CodeGenPGO.cpp +++ b/lib/CodeGen/CodeGenPGO.cpp @@ -666,7 +666,7 @@ void CodeGenPGO::mapRegionCounters(const Decl *D) { } bool CodeGenPGO::skipRegionMappingForDecl(const Decl *D) { - if (SkipCoverageMapping) + if (!D->getBody()) return true; // Don't map the functions in system headers. diff --git a/lib/CodeGen/CodeGenPGO.h b/lib/CodeGen/CodeGenPGO.h index 0026df5..0759e65 100644 --- a/lib/CodeGen/CodeGenPGO.h +++ b/lib/CodeGen/CodeGenPGO.h @@ -40,14 +40,11 @@ private: std::unique_ptr ProfRecord; std::vector RegionCounts; uint64_t CurrentRegionCount; - /// \brief A flag that is set to true when this function doesn't need - /// to have coverage mapping data. - bool SkipCoverageMapping; public: CodeGenPGO(CodeGenModule &CGM) - : CGM(CGM), NumValueSites({{0}}), NumRegionCounters(0), - FunctionHash(0), CurrentRegionCount(0), SkipCoverageMapping(false) {} + : CGM(CGM), NumValueSites({{0}}), NumRegionCounters(0), FunctionHash(0), + CurrentRegionCount(0) {} /// Whether or not we have PGO region data for the current function. This is /// false both when we have no data at all and when our data has been diff --git a/lib/Driver/SanitizerArgs.cpp b/lib/Driver/SanitizerArgs.cpp index 8e61aad..c956136 100644 --- a/lib/Driver/SanitizerArgs.cpp +++ b/lib/Driver/SanitizerArgs.cpp @@ -265,6 +265,10 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC, Add &= ~InvalidTrappingKinds; Add &= Supported; + // Enable coverage if the fuzzing flag is set. + if (Add & Fuzzer) + CoverageFeatures |= CoverageTracePCGuard | CoverageIndirCall | CoverageTraceCmp; + Kinds |= Add; } else if (Arg->getOption().matches(options::OPT_fno_sanitize_EQ)) { Arg->claim(); diff --git a/lib/Driver/ToolChains/Clang.cpp b/lib/Driver/ToolChains/Clang.cpp index 49708e7..6e1e4cc 100644 --- a/lib/Driver/ToolChains/Clang.cpp +++ b/lib/Driver/ToolChains/Clang.cpp @@ -2778,8 +2778,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-fno-split-dwarf-inlining"); if (DebugInfoKind == codegenoptions::NoDebugInfo) DebugInfoKind = codegenoptions::LimitedDebugInfo; - CmdArgs.push_back("-backend-option"); - CmdArgs.push_back("-split-dwarf=Enable"); + CmdArgs.push_back("-enable-split-dwarf"); } // After we've dealt with all combinations of things that could diff --git a/lib/Driver/ToolChains/CommonArgs.cpp b/lib/Driver/ToolChains/CommonArgs.cpp index 93b66eb..5e360f6 100644 --- a/lib/Driver/ToolChains/CommonArgs.cpp +++ b/lib/Driver/ToolChains/CommonArgs.cpp @@ -577,6 +577,17 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args, StaticRuntimes.push_back("esan"); } +static void addLibFuzzerRuntime(const ToolChain &TC, + const ArgList &Args, + ArgStringList &CmdArgs) { + StringRef ParentDir = llvm::sys::path::parent_path(TC.getDriver().InstalledDir); + SmallString<128> P(ParentDir); + llvm::sys::path::append(P, "lib", "libLLVMFuzzer.a"); + CmdArgs.push_back(Args.MakeArgString(P)); + TC.AddCXXStdlibLibArgs(Args, CmdArgs); +} + + // Should be called before we add system libraries (C++ ABI, libstdc++/libc++, // C runtime, etc). Returns true if sanitizer system deps need to be linked in. bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args, @@ -586,6 +597,11 @@ bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args, collectSanitizerRuntimes(TC, Args, SharedRuntimes, StaticRuntimes, NonWholeStaticRuntimes, HelperStaticRuntimes, RequiredSymbols); + // Inject libfuzzer dependencies. + if (TC.getSanitizerArgs().needsFuzzer()) { + addLibFuzzerRuntime(TC, Args, CmdArgs); + } + for (auto RT : SharedRuntimes) addSanitizerRuntime(TC, Args, CmdArgs, RT, true, false); for (auto RT : HelperStaticRuntimes) diff --git a/lib/Driver/ToolChains/Darwin.cpp b/lib/Driver/ToolChains/Darwin.cpp index 009a12d..e41b50c 100644 --- a/lib/Driver/ToolChains/Darwin.cpp +++ b/lib/Driver/ToolChains/Darwin.cpp @@ -930,6 +930,18 @@ void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs, } } +void MachO::AddFuzzerLinkArgs(const ArgList &Args, ArgStringList &CmdArgs) const { + + // Go up one directory from Clang to find the libfuzzer archive file. + StringRef ParentDir = llvm::sys::path::parent_path(getDriver().InstalledDir); + SmallString<128> P(ParentDir); + llvm::sys::path::append(P, "lib", "libLLVMFuzzer.a"); + CmdArgs.push_back(Args.MakeArgString(P)); + + // Libfuzzer is written in C++ and requires libcxx. + AddCXXStdlibLibArgs(Args, CmdArgs); +} + StringRef Darwin::getPlatformFamily() const { switch (TargetPlatform) { case DarwinPlatformKind::MacOS: @@ -1035,10 +1047,14 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args, const SanitizerArgs &Sanitize = getSanitizerArgs(); if (Sanitize.needsAsanRt()) AddLinkSanitizerLibArgs(Args, CmdArgs, "asan"); + if (Sanitize.needsLsanRt()) + AddLinkSanitizerLibArgs(Args, CmdArgs, "lsan"); if (Sanitize.needsUbsanRt()) AddLinkSanitizerLibArgs(Args, CmdArgs, "ubsan"); if (Sanitize.needsTsanRt()) AddLinkSanitizerLibArgs(Args, CmdArgs, "tsan"); + if (Sanitize.needsFuzzer()) + AddFuzzerLinkArgs(Args, CmdArgs); if (Sanitize.needsStatsRt()) { StringRef OS = isTargetMacOS() ? "osx" : "iossim"; AddLinkRuntimeLib(Args, CmdArgs, @@ -1892,6 +1908,8 @@ SanitizerMask Darwin::getSupportedSanitizers() const { const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64; SanitizerMask Res = ToolChain::getSupportedSanitizers(); Res |= SanitizerKind::Address; + Res |= SanitizerKind::Leak; + Res |= SanitizerKind::Fuzzer; if (isTargetMacOS()) { if (!isMacosxVersionLT(10, 9)) Res |= SanitizerKind::Vptr; diff --git a/lib/Driver/ToolChains/Darwin.h b/lib/Driver/ToolChains/Darwin.h index 984f8ef..16ed042 100644 --- a/lib/Driver/ToolChains/Darwin.h +++ b/lib/Driver/ToolChains/Darwin.h @@ -154,6 +154,8 @@ public: /// Add the linker arguments to link the compiler runtime library. virtual void AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const; + virtual void AddFuzzerLinkArgs(const llvm::opt::ArgList &Args, + llvm::opt::ArgStringList &CmdArgs) const; virtual void addStartObjectFileArgs(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const { diff --git a/lib/Driver/ToolChains/Gnu.cpp b/lib/Driver/ToolChains/Gnu.cpp index 313bf38..d29d826 100644 --- a/lib/Driver/ToolChains/Gnu.cpp +++ b/lib/Driver/ToolChains/Gnu.cpp @@ -1747,7 +1747,9 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const { static const char *const ARMTriples[] = {"arm-linux-gnueabi", "arm-linux-androideabi"}; static const char *const ARMHFTriples[] = {"arm-linux-gnueabihf", - "armv7hl-redhat-linux-gnueabi"}; + "armv7hl-redhat-linux-gnueabi", + "armv6hl-suse-linux-gnueabi", + "armv7hl-suse-linux-gnueabi"}; static const char *const ARMebLibDirs[] = {"/lib"}; static const char *const ARMebTriples[] = {"armeb-linux-gnueabi", "armeb-linux-androideabi"}; diff --git a/lib/Driver/ToolChains/Hexagon.cpp b/lib/Driver/ToolChains/Hexagon.cpp index c143b7f..1d7bcf8 100644 --- a/lib/Driver/ToolChains/Hexagon.cpp +++ b/lib/Driver/ToolChains/Hexagon.cpp @@ -402,6 +402,40 @@ Tool *HexagonToolChain::buildLinker() const { return new tools::hexagon::Linker(*this); } +unsigned HexagonToolChain::getOptimizationLevel( + const llvm::opt::ArgList &DriverArgs) const { + // Copied in large part from lib/Frontend/CompilerInvocation.cpp. + Arg *A = DriverArgs.getLastArg(options::OPT_O_Group); + if (!A) + return 0; + + if (A->getOption().matches(options::OPT_O0)) + return 0; + if (A->getOption().matches(options::OPT_Ofast) || + A->getOption().matches(options::OPT_O4)) + return 3; + assert(A->getNumValues() != 0); + StringRef S(A->getValue()); + if (S == "s" || S == "z" || S.empty()) + return 2; + if (S == "g") + return 1; + + unsigned OptLevel; + if (S.getAsInteger(10, OptLevel)) + return 0; + return OptLevel; +} + +void HexagonToolChain::addClangTargetOptions(const ArgList &DriverArgs, + ArgStringList &CC1Args) const { + if (DriverArgs.hasArg(options::OPT_ffp_contract)) + return; + unsigned OptLevel = getOptimizationLevel(DriverArgs); + if (OptLevel >= 3) + CC1Args.push_back("-ffp-contract=fast"); +} + void HexagonToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, ArgStringList &CC1Args) const { if (DriverArgs.hasArg(options::OPT_nostdinc) || diff --git a/lib/Driver/ToolChains/Hexagon.h b/lib/Driver/ToolChains/Hexagon.h index fb50ba3..78f97a3 100644 --- a/lib/Driver/ToolChains/Hexagon.h +++ b/lib/Driver/ToolChains/Hexagon.h @@ -61,11 +61,15 @@ protected: Tool *buildAssembler() const override; Tool *buildLinker() const override; + unsigned getOptimizationLevel(const llvm::opt::ArgList &DriverArgs) const; + public: HexagonToolChain(const Driver &D, const llvm::Triple &Triple, const llvm::opt::ArgList &Args); ~HexagonToolChain() override; + void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; void AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args) const override; diff --git a/lib/Driver/ToolChains/Linux.cpp b/lib/Driver/ToolChains/Linux.cpp index 3ffb2f6..50443a1 100644 --- a/lib/Driver/ToolChains/Linux.cpp +++ b/lib/Driver/ToolChains/Linux.cpp @@ -869,6 +869,7 @@ SanitizerMask Linux::getSupportedSanitizers() const { llvm::Triple::thumbeb; SanitizerMask Res = ToolChain::getSupportedSanitizers(); Res |= SanitizerKind::Address; + Res |= SanitizerKind::Fuzzer; Res |= SanitizerKind::KernelAddress; Res |= SanitizerKind::Vptr; Res |= SanitizerKind::SafeStack; diff --git a/lib/Format/ContinuationIndenter.cpp b/lib/Format/ContinuationIndenter.cpp index 73ae10a..3adb72c 100644 --- a/lib/Format/ContinuationIndenter.cpp +++ b/lib/Format/ContinuationIndenter.cpp @@ -792,7 +792,8 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State, if (Previous && Previous->is(tok::question)) State.Stack.back().QuestionColumn = State.Column; } - if (!Current.opensScope() && !Current.closesScope()) + if (!Current.opensScope() && !Current.closesScope() && + !Current.is(TT_PointerOrReference)) State.LowestLevelOnLine = std::min(State.LowestLevelOnLine, Current.NestingLevel); if (Current.isMemberAccess()) diff --git a/lib/Format/Format.cpp b/lib/Format/Format.cpp index 0e2da71..f55a623 100644 --- a/lib/Format/Format.cpp +++ b/lib/Format/Format.cpp @@ -908,8 +908,8 @@ private: class Formatter : public TokenAnalyzer { public: Formatter(const Environment &Env, const FormatStyle &Style, - bool *IncompleteFormat) - : TokenAnalyzer(Env, Style), IncompleteFormat(IncompleteFormat) {} + FormattingAttemptStatus *Status) + : TokenAnalyzer(Env, Style), Status(Status) {} tooling::Replacements analyze(TokenAnnotator &Annotator, @@ -931,7 +931,7 @@ public: Env.getSourceManager(), Whitespaces, Encoding, BinPackInconclusiveFunctions); UnwrappedLineFormatter(&Indenter, &Whitespaces, Style, Tokens.getKeywords(), - IncompleteFormat) + Env.getSourceManager(), Status) .format(AnnotatedLines); for (const auto &R : Whitespaces.generateReplacements()) if (Result.add(R)) @@ -1013,7 +1013,7 @@ private: } bool BinPackInconclusiveFunctions; - bool *IncompleteFormat; + FormattingAttemptStatus *Status; }; // This class clean up the erroneous/redundant code around the given ranges in @@ -1830,7 +1830,8 @@ cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces, tooling::Replacements reformat(const FormatStyle &Style, StringRef Code, ArrayRef Ranges, - StringRef FileName, bool *IncompleteFormat) { + StringRef FileName, + FormattingAttemptStatus *Status) { FormatStyle Expanded = expandPresets(Style); if (Expanded.DisableFormat) return tooling::Replacements(); @@ -1846,11 +1847,11 @@ tooling::Replacements reformat(const FormatStyle &Style, StringRef Code, auto NewEnv = Environment::CreateVirtualEnvironment( *NewCode, FileName, tooling::calculateRangesAfterReplacements(Fixes, Ranges)); - Formatter Format(*NewEnv, Expanded, IncompleteFormat); + Formatter Format(*NewEnv, Expanded, Status); return Fixes.merge(Format.process()); } } - Formatter Format(*Env, Expanded, IncompleteFormat); + Formatter Format(*Env, Expanded, Status); return Format.process(); }; @@ -1866,7 +1867,7 @@ tooling::Replacements reformat(const FormatStyle &Style, StringRef Code, return reformatAfterApplying(Requoter); } - Formatter Format(*Env, Expanded, IncompleteFormat); + Formatter Format(*Env, Expanded, Status); return Format.process(); } @@ -1879,6 +1880,16 @@ tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, return Clean.process(); } +tooling::Replacements reformat(const FormatStyle &Style, StringRef Code, + ArrayRef Ranges, + StringRef FileName, bool *IncompleteFormat) { + FormattingAttemptStatus Status; + auto Result = reformat(Style, Code, Ranges, FileName, &Status); + if (!Status.FormatComplete) + *IncompleteFormat = true; + return Result; +} + tooling::Replacements fixNamespaceEndComments(const FormatStyle &Style, StringRef Code, ArrayRef Ranges, diff --git a/lib/Format/FormatToken.h b/lib/Format/FormatToken.h index c964912..3b3600f 100644 --- a/lib/Format/FormatToken.h +++ b/lib/Format/FormatToken.h @@ -617,10 +617,12 @@ struct AdditionalKeywords { kw_finally = &IdentTable.get("finally"); kw_from = &IdentTable.get("from"); kw_function = &IdentTable.get("function"); + kw_get = &IdentTable.get("get"); kw_import = &IdentTable.get("import"); kw_is = &IdentTable.get("is"); kw_let = &IdentTable.get("let"); kw_module = &IdentTable.get("module"); + kw_set = &IdentTable.get("set"); kw_type = &IdentTable.get("type"); kw_var = &IdentTable.get("var"); kw_yield = &IdentTable.get("yield"); @@ -675,10 +677,12 @@ struct AdditionalKeywords { IdentifierInfo *kw_finally; IdentifierInfo *kw_from; IdentifierInfo *kw_function; + IdentifierInfo *kw_get; IdentifierInfo *kw_import; IdentifierInfo *kw_is; IdentifierInfo *kw_let; IdentifierInfo *kw_module; + IdentifierInfo *kw_set; IdentifierInfo *kw_type; IdentifierInfo *kw_var; IdentifierInfo *kw_yield; diff --git a/lib/Format/TokenAnnotator.cpp b/lib/Format/TokenAnnotator.cpp index bbc2d1e..c274d7b 100644 --- a/lib/Format/TokenAnnotator.cpp +++ b/lib/Format/TokenAnnotator.cpp @@ -1120,7 +1120,11 @@ private: Current.Type = TT_FunctionAnnotationRParen; } } - } else if (Current.is(tok::at) && Current.Next) { + } else if (Current.is(tok::at) && Current.Next && + Style.Language != FormatStyle::LK_JavaScript && + Style.Language != FormatStyle::LK_Java) { + // In Java & JavaScript, "@..." is a decorator or annotation. In ObjC, it + // marks declarations and properties that need special formatting. switch (Current.Next->Tok.getObjCKeywordID()) { case tok::objc_interface: case tok::objc_implementation: @@ -2541,9 +2545,11 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, } else if (Style.Language == FormatStyle::LK_JavaScript) { const FormatToken *NonComment = Right.getPreviousNonComment(); if (NonComment && - NonComment->isOneOf(tok::kw_return, tok::kw_continue, tok::kw_break, - tok::kw_throw, Keywords.kw_interface, - Keywords.kw_type)) + NonComment->isOneOf( + tok::kw_return, tok::kw_continue, tok::kw_break, tok::kw_throw, + Keywords.kw_interface, Keywords.kw_type, tok::kw_static, + tok::kw_public, tok::kw_private, tok::kw_protected, + Keywords.kw_abstract, Keywords.kw_get, Keywords.kw_set)) return false; // Otherwise a semicolon is inserted. if (Left.is(TT_JsFatArrow) && Right.is(tok::l_brace)) return false; diff --git a/lib/Format/UnwrappedLineFormatter.cpp b/lib/Format/UnwrappedLineFormatter.cpp index c3c154a..8ff8934 100644 --- a/lib/Format/UnwrappedLineFormatter.cpp +++ b/lib/Format/UnwrappedLineFormatter.cpp @@ -835,8 +835,11 @@ UnwrappedLineFormatter::format(const SmallVectorImpl &Lines, bool ShouldFormat = TheLine.Affected || FixIndentation; // We cannot format this line; if the reason is that the line had a // parsing error, remember that. - if (ShouldFormat && TheLine.Type == LT_Invalid && IncompleteFormat) - *IncompleteFormat = true; + if (ShouldFormat && TheLine.Type == LT_Invalid && Status) { + Status->FormatComplete = false; + Status->Line = + SourceMgr.getSpellingLineNumber(TheLine.First->Tok.getLocation()); + } if (ShouldFormat && TheLine.Type != LT_Invalid) { if (!DryRun) diff --git a/lib/Format/UnwrappedLineFormatter.h b/lib/Format/UnwrappedLineFormatter.h index 93247f7..55f0d1c 100644 --- a/lib/Format/UnwrappedLineFormatter.h +++ b/lib/Format/UnwrappedLineFormatter.h @@ -32,9 +32,11 @@ public: WhitespaceManager *Whitespaces, const FormatStyle &Style, const AdditionalKeywords &Keywords, - bool *IncompleteFormat) + const SourceManager &SourceMgr, + FormattingAttemptStatus *Status) : Indenter(Indenter), Whitespaces(Whitespaces), Style(Style), - Keywords(Keywords), IncompleteFormat(IncompleteFormat) {} + Keywords(Keywords), SourceMgr(SourceMgr), + Status(Status) {} /// \brief Format the current block and return the penalty. unsigned format(const SmallVectorImpl &Lines, @@ -63,7 +65,8 @@ private: WhitespaceManager *Whitespaces; const FormatStyle &Style; const AdditionalKeywords &Keywords; - bool *IncompleteFormat; + const SourceManager &SourceMgr; + FormattingAttemptStatus *Status; }; } // end namespace format } // end namespace clang diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp index 0e0eb40..8cdb829 100644 --- a/lib/Frontend/CompilerInvocation.cpp +++ b/lib/Frontend/CompilerInvocation.cpp @@ -519,6 +519,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK, Opts.MacroDebugInfo = Args.hasArg(OPT_debug_info_macro); Opts.WholeProgramVTables = Args.hasArg(OPT_fwhole_program_vtables); Opts.LTOVisibilityPublicStd = Args.hasArg(OPT_flto_visibility_public_std); + Opts.EnableSplitDwarf = Args.hasArg(OPT_enable_split_dwarf); Opts.SplitDwarfFile = Args.getLastArgValue(OPT_split_dwarf_file); Opts.SplitDwarfInlining = !Args.hasArg(OPT_fno_split_dwarf_inlining); Opts.DebugTypeExtRefs = Args.hasArg(OPT_dwarf_ext_refs); diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp index 88be773..0dd04e8 100644 --- a/lib/Frontend/InitPreprocessor.cpp +++ b/lib/Frontend/InitPreprocessor.cpp @@ -882,14 +882,16 @@ static void InitializePredefinedMacros(const TargetInfo &TI, // The value written by __atomic_test_and_set. // FIXME: This is target-dependent. Builder.defineMacro("__GCC_ATOMIC_TEST_AND_SET_TRUEVAL", "1"); + } + auto addLockFreeMacros = [&](const llvm::Twine &Prefix) { // Used by libc++ and libstdc++ to implement ATOMIC__LOCK_FREE. unsigned InlineWidthBits = TI.getMaxAtomicInlineWidth(); -#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \ - Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \ - getLockFreeValue(TI.get##Type##Width(), \ - TI.get##Type##Align(), \ - InlineWidthBits)); +#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \ + Builder.defineMacro(Prefix + #TYPE "_LOCK_FREE", \ + getLockFreeValue(TI.get##Type##Width(), \ + TI.get##Type##Align(), \ + InlineWidthBits)); DEFINE_LOCK_FREE_MACRO(BOOL, Bool); DEFINE_LOCK_FREE_MACRO(CHAR, Char); DEFINE_LOCK_FREE_MACRO(CHAR16_T, Char16); @@ -899,12 +901,15 @@ static void InitializePredefinedMacros(const TargetInfo &TI, DEFINE_LOCK_FREE_MACRO(INT, Int); DEFINE_LOCK_FREE_MACRO(LONG, Long); DEFINE_LOCK_FREE_MACRO(LLONG, LongLong); - Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE", + Builder.defineMacro(Prefix + "POINTER_LOCK_FREE", getLockFreeValue(TI.getPointerWidth(0), TI.getPointerAlign(0), InlineWidthBits)); #undef DEFINE_LOCK_FREE_MACRO - } + }; + addLockFreeMacros("__CLANG_ATOMIC_"); + if (!LangOpts.MSVCCompat) + addLockFreeMacros("__GCC_ATOMIC_"); if (LangOpts.NoInlineDefine) Builder.defineMacro("__NO_INLINE__"); diff --git a/lib/Headers/stdatomic.h b/lib/Headers/stdatomic.h index 23bb3a3..b4845a7 100644 --- a/lib/Headers/stdatomic.h +++ b/lib/Headers/stdatomic.h @@ -40,16 +40,16 @@ extern "C" { /* 7.17.1 Introduction */ -#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE -#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE -#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE -#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE -#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE -#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE -#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE -#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE -#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE -#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE +#define ATOMIC_BOOL_LOCK_FREE __CLANG_ATOMIC_BOOL_LOCK_FREE +#define ATOMIC_CHAR_LOCK_FREE __CLANG_ATOMIC_CHAR_LOCK_FREE +#define ATOMIC_CHAR16_T_LOCK_FREE __CLANG_ATOMIC_CHAR16_T_LOCK_FREE +#define ATOMIC_CHAR32_T_LOCK_FREE __CLANG_ATOMIC_CHAR32_T_LOCK_FREE +#define ATOMIC_WCHAR_T_LOCK_FREE __CLANG_ATOMIC_WCHAR_T_LOCK_FREE +#define ATOMIC_SHORT_LOCK_FREE __CLANG_ATOMIC_SHORT_LOCK_FREE +#define ATOMIC_INT_LOCK_FREE __CLANG_ATOMIC_INT_LOCK_FREE +#define ATOMIC_LONG_LOCK_FREE __CLANG_ATOMIC_LONG_LOCK_FREE +#define ATOMIC_LLONG_LOCK_FREE __CLANG_ATOMIC_LLONG_LOCK_FREE +#define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE /* 7.17.2 Initialization */ diff --git a/lib/Index/IndexDecl.cpp b/lib/Index/IndexDecl.cpp index 0e89350..c1eed16 100644 --- a/lib/Index/IndexDecl.cpp +++ b/lib/Index/IndexDecl.cpp @@ -14,6 +14,13 @@ using namespace clang; using namespace index; +#define TRY_DECL(D,CALL_EXPR) \ + do { \ + if (!IndexCtx.shouldIndex(D)) return true; \ + if (!CALL_EXPR) \ + return false; \ + } while (0) + #define TRY_TO(CALL_EXPR) \ do { \ if (!CALL_EXPR) \ @@ -120,8 +127,7 @@ public: D->getDeclContext(), 0); } - if (!IndexCtx.handleDecl(D, MethodLoc, Roles, Relations)) - return false; + TRY_DECL(D, IndexCtx.handleDecl(D, MethodLoc, Roles, Relations)); IndexCtx.indexTypeSourceInfo(D->getReturnTypeSourceInfo(), D); bool hasIBActionAndFirst = D->hasAttr(); for (const auto *I : D->parameters()) { @@ -138,6 +144,53 @@ public: return true; } + /// Gather the declarations which the given declaration \D overrides in a + /// pseudo-override manner. + /// + /// Pseudo-overrides occur when a class template specialization declares + /// a declaration that has the same name as a similar declaration in the + /// non-specialized template. + void + gatherTemplatePseudoOverrides(const NamedDecl *D, + SmallVectorImpl &Relations) { + if (!IndexCtx.getLangOpts().CPlusPlus) + return; + const auto *CTSD = + dyn_cast(D->getLexicalDeclContext()); + if (!CTSD) + return; + llvm::PointerUnion + Template = CTSD->getSpecializedTemplateOrPartial(); + if (const auto *CTD = Template.dyn_cast()) { + const CXXRecordDecl *Pattern = CTD->getTemplatedDecl(); + bool TypeOverride = isa(D); + for (const NamedDecl *ND : Pattern->lookup(D->getDeclName())) { + if (const auto *CTD = dyn_cast(ND)) + ND = CTD->getTemplatedDecl(); + if (ND->isImplicit()) + continue; + // Types can override other types. + if (!TypeOverride) { + if (ND->getKind() != D->getKind()) + continue; + } else if (!isa(ND)) + continue; + if (const auto *FD = dyn_cast(ND)) { + const auto *DFD = cast(D); + // Function overrides are approximated using the number of parameters. + if (FD->getStorageClass() != DFD->getStorageClass() || + FD->getNumParams() != DFD->getNumParams()) + continue; + } + Relations.emplace_back( + SymbolRoleSet(SymbolRole::RelationOverrideOf) | + SymbolRoleSet(SymbolRole::RelationSpecializationOf), + ND); + } + } + } + bool VisitFunctionDecl(const FunctionDecl *D) { if (D->isDeleted()) return true; @@ -152,9 +205,13 @@ public: Relations.emplace_back((unsigned)SymbolRole::RelationOverrideOf, *I); } } + gatherTemplatePseudoOverrides(D, Relations); + if (const auto *Base = D->getPrimaryTemplate()) + Relations.push_back( + SymbolRelation(SymbolRoleSet(SymbolRole::RelationSpecializationOf), + Base->getTemplatedDecl())); - if (!IndexCtx.handleDecl(D, Roles, Relations)) - return false; + TRY_DECL(D, IndexCtx.handleDecl(D, Roles, Relations)); handleDeclarator(D); if (const CXXConstructorDecl *Ctor = dyn_cast(D)) { @@ -189,16 +246,18 @@ public: } bool VisitVarDecl(const VarDecl *D) { - if (!IndexCtx.handleDecl(D)) - return false; + SmallVector Relations; + gatherTemplatePseudoOverrides(D, Relations); + TRY_DECL(D, IndexCtx.handleDecl(D, SymbolRoleSet(), Relations)); handleDeclarator(D); IndexCtx.indexBody(D->getInit(), D); return true; } bool VisitFieldDecl(const FieldDecl *D) { - if (!IndexCtx.handleDecl(D)) - return false; + SmallVector Relations; + gatherTemplatePseudoOverrides(D, Relations); + TRY_DECL(D, IndexCtx.handleDecl(D, SymbolRoleSet(), Relations)); handleDeclarator(D); if (D->isBitField()) IndexCtx.indexBody(D->getBitWidth(), D); @@ -212,8 +271,7 @@ public: // handled in VisitObjCPropertyImplDecl return true; } - if (!IndexCtx.handleDecl(D)) - return false; + TRY_DECL(D, IndexCtx.handleDecl(D)); handleDeclarator(D); return true; } @@ -224,17 +282,18 @@ public: } bool VisitEnumConstantDecl(const EnumConstantDecl *D) { - if (!IndexCtx.handleDecl(D)) - return false; + TRY_DECL(D, IndexCtx.handleDecl(D)); IndexCtx.indexBody(D->getInitExpr(), D); return true; } bool VisitTypedefNameDecl(const TypedefNameDecl *D) { - if (!D->isTransparentTag()) - if (!IndexCtx.handleDecl(D)) - return false; - IndexCtx.indexTypeSourceInfo(D->getTypeSourceInfo(), D); + if (!D->isTransparentTag()) { + SmallVector Relations; + gatherTemplatePseudoOverrides(D, Relations); + TRY_DECL(D, IndexCtx.handleDecl(D, SymbolRoleSet(), Relations)); + IndexCtx.indexTypeSourceInfo(D->getTypeSourceInfo(), D); + } return true; } @@ -242,7 +301,9 @@ public: // Non-free standing tags are handled in indexTypeSourceInfo. if (D->isFreeStanding()) { if (D->isThisDeclarationADefinition()) { - IndexCtx.indexTagDecl(D); + SmallVector Relations; + gatherTemplatePseudoOverrides(D, Relations); + IndexCtx.indexTagDecl(D, Relations); } else { auto *Parent = dyn_cast(D->getDeclContext()); return IndexCtx.handleReference(D, D->getLocation(), Parent, @@ -272,7 +333,7 @@ public: bool VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) { if (D->isThisDeclarationADefinition()) { - TRY_TO(IndexCtx.handleDecl(D)); + TRY_DECL(D, IndexCtx.handleDecl(D)); SourceLocation SuperLoc = D->getSuperClassLoc(); if (auto *SuperD = D->getSuperClass()) { bool hasSuperTypedef = false; @@ -303,7 +364,7 @@ public: bool VisitObjCProtocolDecl(const ObjCProtocolDecl *D) { if (D->isThisDeclarationADefinition()) { - TRY_TO(IndexCtx.handleDecl(D)); + TRY_DECL(D, IndexCtx.handleDecl(D)); TRY_TO(handleReferencedProtocols(D->getReferencedProtocols(), D, /*superLoc=*/SourceLocation())); TRY_TO(IndexCtx.indexDeclContext(D)); @@ -322,8 +383,7 @@ public: if (Class->isImplicitInterfaceDecl()) IndexCtx.handleDecl(Class); - if (!IndexCtx.handleDecl(D)) - return false; + TRY_DECL(D, IndexCtx.handleDecl(D)); // Visit implicit @synthesize property implementations first as their // location is reported at the name of the @implementation block. This @@ -342,6 +402,8 @@ public: } bool VisitObjCCategoryDecl(const ObjCCategoryDecl *D) { + if (!IndexCtx.shouldIndex(D)) + return true; const ObjCInterfaceDecl *C = D->getClassInterface(); if (!C) return true; @@ -370,8 +432,7 @@ public: SourceLocation CategoryLoc = D->getCategoryNameLoc(); if (!CategoryLoc.isValid()) CategoryLoc = D->getLocation(); - if (!IndexCtx.handleDecl(D, CategoryLoc)) - return false; + TRY_DECL(D, IndexCtx.handleDecl(D, CategoryLoc)); IndexCtx.indexDeclContext(D); return true; } @@ -393,8 +454,7 @@ public: if (ObjCMethodDecl *MD = D->getSetterMethodDecl()) if (MD->getLexicalDeclContext() == D->getLexicalDeclContext()) handleObjCMethod(MD, D); - if (!IndexCtx.handleDecl(D)) - return false; + TRY_DECL(D, IndexCtx.handleDecl(D)); if (IBOutletCollectionAttr *attr = D->getAttr()) IndexCtx.indexTypeSourceInfo(attr->getInterfaceLoc(), D, D->getLexicalDeclContext(), false, true); @@ -415,8 +475,7 @@ public: Loc = Container->getLocation(); Roles |= (SymbolRoleSet)SymbolRole::Implicit; } - if (!IndexCtx.handleDecl(D, Loc, Roles, Relations)) - return false; + TRY_DECL(D, IndexCtx.handleDecl(D, Loc, Roles, Relations)); if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) return true; @@ -450,8 +509,7 @@ public: } else if (D->getLocation() == IvarLoc) { IvarRoles = (SymbolRoleSet)SymbolRole::Implicit; } - if(!IndexCtx.handleDecl(IvarD, IvarLoc, IvarRoles)) - return false; + TRY_DECL(IvarD, IndexCtx.handleDecl(IvarD, IvarLoc, IvarRoles)); } else { IndexCtx.handleReference(IvarD, D->getPropertyIvarDeclLoc(), nullptr, D->getDeclContext(), SymbolRoleSet()); @@ -461,8 +519,7 @@ public: } bool VisitNamespaceDecl(const NamespaceDecl *D) { - if (!IndexCtx.handleDecl(D)) - return false; + TRY_DECL(D, IndexCtx.handleDecl(D)); IndexCtx.indexDeclContext(D); return true; } @@ -507,6 +564,9 @@ public: D, SymbolRelation(SymbolRoleSet(SymbolRole::RelationSpecializationOf), SpecializationOf)); } + if (TypeSourceInfo *TSI = D->getTypeAsWritten()) + IndexCtx.indexTypeSourceInfo(TSI, /*Parent=*/nullptr, + D->getLexicalDeclContext()); return true; } diff --git a/lib/Index/IndexSymbol.cpp b/lib/Index/IndexSymbol.cpp index ea66b70..0bfa193 100644 --- a/lib/Index/IndexSymbol.cpp +++ b/lib/Index/IndexSymbol.cpp @@ -318,6 +318,20 @@ SymbolInfo index::getSymbolInfo(const Decl *D) { if (Info.Properties & (unsigned)SymbolProperty::Generic) Info.Lang = SymbolLanguage::CXX; + auto getExternalSymAttr = [](const Decl *D) -> ExternalSourceSymbolAttr* { + if (auto *attr = D->getAttr()) + return attr; + if (auto *dcd = dyn_cast(D->getDeclContext())) { + if (auto *attr = dcd->getAttr()) + return attr; + } + return nullptr; + }; + if (auto *attr = getExternalSymAttr(D)) { + if (attr->getLanguage() == "Swift") + Info.Lang = SymbolLanguage::Swift; + } + return Info; } @@ -458,6 +472,7 @@ StringRef index::getSymbolLanguageString(SymbolLanguage K) { case SymbolLanguage::C: return "C"; case SymbolLanguage::ObjC: return "ObjC"; case SymbolLanguage::CXX: return "C++"; + case SymbolLanguage::Swift: return "Swift"; } llvm_unreachable("invalid symbol language kind"); } diff --git a/lib/Index/IndexTypeSourceInfo.cpp b/lib/Index/IndexTypeSourceInfo.cpp index a3566a9..44d1241 100644 --- a/lib/Index/IndexTypeSourceInfo.cpp +++ b/lib/Index/IndexTypeSourceInfo.cpp @@ -210,6 +210,8 @@ void IndexingContext::indexNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, void IndexingContext::indexTagDecl(const TagDecl *D, ArrayRef Relations) { + if (!shouldIndex(D)) + return; if (!shouldIndexFunctionLocalSymbols() && isFunctionLocalSymbol(D)) return; diff --git a/lib/Index/IndexingContext.cpp b/lib/Index/IndexingContext.cpp index 85574d0..709a236 100644 --- a/lib/Index/IndexingContext.cpp +++ b/lib/Index/IndexingContext.cpp @@ -17,6 +17,21 @@ using namespace clang; using namespace index; +static bool isGeneratedDecl(const Decl *D) { + if (auto *attr = D->getAttr()) { + return attr->getGeneratedDeclaration(); + } + return false; +} + +bool IndexingContext::shouldIndex(const Decl *D) { + return !isGeneratedDecl(D); +} + +const LangOptions &IndexingContext::getLangOpts() const { + return Ctx->getLangOpts(); +} + bool IndexingContext::shouldIndexFunctionLocalSymbols() const { return IndexOpts.IndexFunctionLocals; } diff --git a/lib/Index/IndexingContext.h b/lib/Index/IndexingContext.h index 1ebf6f9..566651c 100644 --- a/lib/Index/IndexingContext.h +++ b/lib/Index/IndexingContext.h @@ -48,6 +48,10 @@ public: void setASTContext(ASTContext &ctx) { Ctx = &ctx; } + bool shouldIndex(const Decl *D); + + const LangOptions &getLangOpts() const; + bool shouldSuppressRefs() const { return false; } diff --git a/lib/Index/USRGeneration.cpp b/lib/Index/USRGeneration.cpp index 73dddd9..ed469f6 100644 --- a/lib/Index/USRGeneration.cpp +++ b/lib/Index/USRGeneration.cpp @@ -46,6 +46,15 @@ static bool printLoc(llvm::raw_ostream &OS, SourceLocation Loc, return false; } +static StringRef GetExternalSourceContainer(const NamedDecl *D) { + if (!D) + return StringRef(); + if (auto *attr = D->getAttr()) { + return attr->getDefinedIn(); + } + return StringRef(); +} + namespace { class USRGenerator : public ConstDeclVisitor { SmallVectorImpl &Buf; @@ -79,7 +88,8 @@ public: void VisitNamespaceAliasDecl(const NamespaceAliasDecl *D); void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D); void VisitClassTemplateDecl(const ClassTemplateDecl *D); - void VisitObjCContainerDecl(const ObjCContainerDecl *CD); + void VisitObjCContainerDecl(const ObjCContainerDecl *CD, + const ObjCCategoryDecl *CatD = nullptr); void VisitObjCMethodDecl(const ObjCMethodDecl *MD); void VisitObjCPropertyDecl(const ObjCPropertyDecl *D); void VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D); @@ -116,6 +126,8 @@ public: return D->getParentFunctionOrMethod() != nullptr; } + void GenExtSymbolContainer(const NamedDecl *D); + /// Generate the string component containing the location of the /// declaration. bool GenLoc(const Decl *D, bool IncludeOffset); @@ -127,13 +139,16 @@ public: /// itself. /// Generate a USR for an Objective-C class. - void GenObjCClass(StringRef cls) { - generateUSRForObjCClass(cls, Out); + void GenObjCClass(StringRef cls, StringRef ExtSymDefinedIn, + StringRef CategoryContextExtSymbolDefinedIn) { + generateUSRForObjCClass(cls, Out, ExtSymDefinedIn, + CategoryContextExtSymbolDefinedIn); } /// Generate a USR for an Objective-C class category. - void GenObjCCategory(StringRef cls, StringRef cat) { - generateUSRForObjCCategory(cls, cat, Out); + void GenObjCCategory(StringRef cls, StringRef cat, + StringRef clsExt, StringRef catExt) { + generateUSRForObjCCategory(cls, cat, Out, clsExt, catExt); } /// Generate a USR fragment for an Objective-C property. @@ -142,8 +157,8 @@ public: } /// Generate a USR for an Objective-C protocol. - void GenObjCProtocol(StringRef prot) { - generateUSRForObjCProtocol(prot, Out); + void GenObjCProtocol(StringRef prot, StringRef ext) { + generateUSRForObjCProtocol(prot, Out, ext); } void VisitType(QualType T); @@ -204,7 +219,11 @@ void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) { if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D))) return; + const unsigned StartSize = Buf.size(); VisitDeclContext(D->getDeclContext()); + if (Buf.size() == StartSize) + GenExtSymbolContainer(D); + bool IsTemplate = false; if (FunctionTemplateDecl *FunTmpl = D->getDescribedFunctionTemplate()) { IsTemplate = true; @@ -367,7 +386,16 @@ void USRGenerator::VisitObjCMethodDecl(const ObjCMethodDecl *D) { IgnoreResults = true; return; } - Visit(ID); + auto getCategoryContext = [](const ObjCMethodDecl *D) -> + const ObjCCategoryDecl * { + if (auto *CD = dyn_cast(D->getDeclContext())) + return CD; + if (auto *ICD = dyn_cast(D->getDeclContext())) + return ICD->getCategoryDecl(); + return nullptr; + }; + auto *CD = getCategoryContext(D); + VisitObjCContainerDecl(ID, CD); } // Ideally we would use 'GenObjCMethod', but this is such a hot path // for Objective-C code that we don't want to use @@ -376,13 +404,15 @@ void USRGenerator::VisitObjCMethodDecl(const ObjCMethodDecl *D) { << DeclarationName(D->getSelector()); } -void USRGenerator::VisitObjCContainerDecl(const ObjCContainerDecl *D) { +void USRGenerator::VisitObjCContainerDecl(const ObjCContainerDecl *D, + const ObjCCategoryDecl *CatD) { switch (D->getKind()) { default: llvm_unreachable("Invalid ObjC container."); case Decl::ObjCInterface: case Decl::ObjCImplementation: - GenObjCClass(D->getName()); + GenObjCClass(D->getName(), GetExternalSourceContainer(D), + GetExternalSourceContainer(CatD)); break; case Decl::ObjCCategory: { const ObjCCategoryDecl *CD = cast(D); @@ -402,7 +432,9 @@ void USRGenerator::VisitObjCContainerDecl(const ObjCContainerDecl *D) { GenLoc(CD, /*IncludeOffset=*/true); } else - GenObjCCategory(ID->getName(), CD->getName()); + GenObjCCategory(ID->getName(), CD->getName(), + GetExternalSourceContainer(ID), + GetExternalSourceContainer(CD)); break; } @@ -417,12 +449,16 @@ void USRGenerator::VisitObjCContainerDecl(const ObjCContainerDecl *D) { IgnoreResults = true; return; } - GenObjCCategory(ID->getName(), CD->getName()); + GenObjCCategory(ID->getName(), CD->getName(), + GetExternalSourceContainer(ID), + GetExternalSourceContainer(CD)); break; } - case Decl::ObjCProtocol: - GenObjCProtocol(cast(D)->getName()); + case Decl::ObjCProtocol: { + const ObjCProtocolDecl *PD = cast(D); + GenObjCProtocol(PD->getName(), GetExternalSourceContainer(PD)); break; + } } } @@ -452,6 +488,8 @@ void USRGenerator::VisitTagDecl(const TagDecl *D) { ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D))) return; + GenExtSymbolContainer(D); + D = D->getCanonicalDecl(); VisitDeclContext(D->getDeclContext()); @@ -544,6 +582,12 @@ void USRGenerator::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) { GenLoc(D, /*IncludeOffset=*/true); } +void USRGenerator::GenExtSymbolContainer(const NamedDecl *D) { + StringRef Container = GetExternalSourceContainer(D); + if (!Container.empty()) + Out << "@M@" << Container; +} + bool USRGenerator::GenLoc(const Decl *D, bool IncludeOffset) { if (generatedLoc) return IgnoreResults; @@ -866,12 +910,34 @@ void USRGenerator::VisitTemplateArgument(const TemplateArgument &Arg) { // USR generation functions. //===----------------------------------------------------------------------===// -void clang::index::generateUSRForObjCClass(StringRef Cls, raw_ostream &OS) { +static void combineClassAndCategoryExtContainers(StringRef ClsSymDefinedIn, + StringRef CatSymDefinedIn, + raw_ostream &OS) { + if (ClsSymDefinedIn.empty() && CatSymDefinedIn.empty()) + return; + if (CatSymDefinedIn.empty()) { + OS << "@M@" << ClsSymDefinedIn << '@'; + return; + } + OS << "@CM@" << CatSymDefinedIn << '@'; + if (ClsSymDefinedIn != CatSymDefinedIn) { + OS << ClsSymDefinedIn << '@'; + } +} + +void clang::index::generateUSRForObjCClass(StringRef Cls, raw_ostream &OS, + StringRef ExtSymDefinedIn, + StringRef CategoryContextExtSymbolDefinedIn) { + combineClassAndCategoryExtContainers(ExtSymDefinedIn, + CategoryContextExtSymbolDefinedIn, OS); OS << "objc(cs)" << Cls; } void clang::index::generateUSRForObjCCategory(StringRef Cls, StringRef Cat, - raw_ostream &OS) { + raw_ostream &OS, + StringRef ClsSymDefinedIn, + StringRef CatSymDefinedIn) { + combineClassAndCategoryExtContainers(ClsSymDefinedIn, CatSymDefinedIn, OS); OS << "objc(cy)" << Cls << '@' << Cat; } @@ -890,10 +956,25 @@ void clang::index::generateUSRForObjCProperty(StringRef Prop, bool isClassProp, OS << (isClassProp ? "(cpy)" : "(py)") << Prop; } -void clang::index::generateUSRForObjCProtocol(StringRef Prot, raw_ostream &OS) { +void clang::index::generateUSRForObjCProtocol(StringRef Prot, raw_ostream &OS, + StringRef ExtSymDefinedIn) { + if (!ExtSymDefinedIn.empty()) + OS << "@M@" << ExtSymDefinedIn << '@'; OS << "objc(pl)" << Prot; } +void clang::index::generateUSRForGlobalEnum(StringRef EnumName, raw_ostream &OS, + StringRef ExtSymDefinedIn) { + if (!ExtSymDefinedIn.empty()) + OS << "@M@" << ExtSymDefinedIn; + OS << "@E@" << EnumName; +} + +void clang::index::generateUSRForEnumConstant(StringRef EnumConstantName, + raw_ostream &OS) { + OS << '@' << EnumConstantName; +} + bool clang::index::generateUSRForDecl(const Decl *D, SmallVectorImpl &Buf) { if (!D) diff --git a/lib/Lex/ModuleMap.cpp b/lib/Lex/ModuleMap.cpp index 5b60ed3..512d7dc 100644 --- a/lib/Lex/ModuleMap.cpp +++ b/lib/Lex/ModuleMap.cpp @@ -581,6 +581,7 @@ Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc, auto *Result = new Module(Name, Loc, nullptr, /*IsFramework*/ false, /*IsExplicit*/ false, NumCreatedModules++); + Result->Kind = Module::ModuleInterfaceUnit; Modules[Name] = SourceModule = Result; // Mark the main source file as being within the newly-created module so that diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp index f81eaa3..87e105d 100644 --- a/lib/Lex/Pragma.cpp +++ b/lib/Lex/Pragma.cpp @@ -989,9 +989,9 @@ struct PragmaDebugHandler : public PragmaHandler { #ifdef _MSC_VER #pragma warning(disable : 4717) #endif - static void DebugOverflowStack() { - void (*volatile Self)() = DebugOverflowStack; - Self(); + static void DebugOverflowStack(void (*P)() = nullptr) { + void (*volatile Self)(void(*P)()) = DebugOverflowStack; + Self(reinterpret_cast(Self)); } #ifdef _MSC_VER #pragma warning(default : 4717) diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp index ad45ab1..3e02e46 100644 --- a/lib/Parse/ParseExpr.cpp +++ b/lib/Parse/ParseExpr.cpp @@ -339,7 +339,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) { ColonLoc = Tok.getLocation(); } } - + // Code completion for the right-hand side of an assignment expression // goes through a special hook that takes the left-hand side into account. if (Tok.is(tok::code_completion) && NextTokPrec == prec::Assignment) { @@ -347,7 +347,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) { cutOffParsing(); return ExprError(); } - + // Parse another leaf here for the RHS of the operator. // ParseCastExpression works here because all RHS expressions in C have it // as a prefix, at least. However, in C++, an assignment-expression could @@ -456,6 +456,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) { if (!getLangOpts().CPlusPlus) continue; } + // Ensure potential typos aren't left undiagnosed. if (LHS.isInvalid()) { Actions.CorrectDelayedTyposInExpr(OrigLHS); diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp index edbfc63..38476ce 100644 --- a/lib/Parse/Parser.cpp +++ b/lib/Parse/Parser.cpp @@ -534,23 +534,6 @@ void Parser::LateTemplateParserCleanupCallback(void *P) { } bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) { - // C++ Modules TS: module-declaration must be the first declaration in the - // file. (There can be no preceding preprocessor directives, but we expect - // the lexer to check that.) - if (Tok.is(tok::kw_module)) { - Result = ParseModuleDecl(); - return false; - } else if (getLangOpts().getCompilingModule() == - LangOptions::CMK_ModuleInterface) { - // FIXME: We avoid providing this diagnostic when generating an object file - // from an existing PCM file. This is not a good way to detect this - // condition; we should provide a mechanism to indicate whether we've - // already parsed a declaration in this translation unit and avoid calling - // ParseFirstTopLevelDecl in that case. - if (Actions.TUKind == TU_Module) - Diag(Tok, diag::err_expected_module_interface_decl); - } - // C11 6.9p1 says translation units must have at least one top-level // declaration. C++ doesn't have this restriction. We also don't want to // complain if we have a precompiled header, although technically if the PCH @@ -583,6 +566,14 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) { Result = ParseModuleImport(SourceLocation()); return false; + case tok::kw_export: + if (NextToken().isNot(tok::kw_module)) + break; + LLVM_FALLTHROUGH; + case tok::kw_module: + Result = ParseModuleDecl(); + return false; + case tok::annot_module_include: Actions.ActOnModuleInclude(Tok.getLocation(), reinterpret_cast( @@ -2049,30 +2040,28 @@ void Parser::ParseMicrosoftIfExistsExternalDeclaration() { /// Parse a C++ Modules TS module declaration, which appears at the beginning /// of a module interface, module partition, or module implementation file. /// -/// module-declaration: [Modules TS + P0273R0] -/// 'module' module-kind[opt] module-name attribute-specifier-seq[opt] ';' -/// module-kind: -/// 'implementation' -/// 'partition' +/// module-declaration: [Modules TS + P0273R0 + P0629R0] +/// 'export'[opt] 'module' 'partition'[opt] +/// module-name attribute-specifier-seq[opt] ';' /// -/// Note that the module-kind values are context-sensitive keywords. +/// Note that 'partition' is a context-sensitive keyword. Parser::DeclGroupPtrTy Parser::ParseModuleDecl() { - assert(Tok.is(tok::kw_module) && getLangOpts().ModulesTS && - "should not be parsing a module declaration"); + SourceLocation StartLoc = Tok.getLocation(); + + Sema::ModuleDeclKind MDK = TryConsumeToken(tok::kw_export) + ? Sema::ModuleDeclKind::Module + : Sema::ModuleDeclKind::Implementation; + + assert(Tok.is(tok::kw_module) && "not a module declaration"); SourceLocation ModuleLoc = ConsumeToken(); - // Check for a module-kind. - Sema::ModuleDeclKind MDK = Sema::ModuleDeclKind::Module; - if (Tok.is(tok::identifier) && NextToken().is(tok::identifier)) { - if (Tok.getIdentifierInfo()->isStr("implementation")) - MDK = Sema::ModuleDeclKind::Implementation; - else if (Tok.getIdentifierInfo()->isStr("partition")) - MDK = Sema::ModuleDeclKind::Partition; - else { - Diag(Tok, diag::err_unexpected_module_kind) << Tok.getIdentifierInfo(); - SkipUntil(tok::semi); - return nullptr; - } + if (Tok.is(tok::identifier) && NextToken().is(tok::identifier) && + Tok.getIdentifierInfo()->isStr("partition")) { + // If 'partition' is present, this must be a module interface unit. + if (MDK != Sema::ModuleDeclKind::Module) + Diag(Tok.getLocation(), diag::err_module_implementation_partition) + << FixItHint::CreateInsertion(ModuleLoc, "export "); + MDK = Sema::ModuleDeclKind::Partition; ConsumeToken(); } @@ -2080,14 +2069,14 @@ Parser::DeclGroupPtrTy Parser::ParseModuleDecl() { if (ParseModuleName(ModuleLoc, Path, /*IsImport*/false)) return nullptr; + // We don't support any module attributes yet; just parse them and diagnose. ParsedAttributesWithRange Attrs(AttrFactory); MaybeParseCXX11Attributes(Attrs); - // We don't support any module attributes yet. ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_module_attr); ExpectAndConsumeSemi(diag::err_module_expected_semi); - return Actions.ActOnModuleDecl(ModuleLoc, MDK, Path); + return Actions.ActOnModuleDecl(StartLoc, ModuleLoc, MDK, Path); } /// Parse a module import declaration. This is essentially the same for diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp index 950f040..94e792c 100644 --- a/lib/Sema/Sema.cpp +++ b/lib/Sema/Sema.cpp @@ -1161,10 +1161,14 @@ void Sema::PushFunctionScope() { // memory for a new scope. FunctionScopes.back()->Clear(); FunctionScopes.push_back(FunctionScopes.back()); + if (LangOpts.OpenMP) + pushOpenMPFunctionRegion(); return; } FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics())); + if (LangOpts.OpenMP) + pushOpenMPFunctionRegion(); } void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) { @@ -1192,6 +1196,9 @@ void Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, FunctionScopeInfo *Scope = FunctionScopes.pop_back_val(); assert(!FunctionScopes.empty() && "mismatched push/pop!"); + if (LangOpts.OpenMP) + popOpenMPFunctionRegion(Scope); + // Issue any analysis-based warnings. if (WP && D) AnalysisWarnings.IssueWarnings(*WP, Scope, D, blkExpr); diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp index 45523b3..044ec74 100644 --- a/lib/Sema/SemaChecking.cpp +++ b/lib/Sema/SemaChecking.cpp @@ -408,7 +408,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { } // Third argument is always an ndrange_t type. - if (Arg2->getType().getAsString() != "ndrange_t") { + if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { S.Diag(TheCall->getArg(2)->getLocStart(), diag::err_opencl_enqueue_kernel_expected_type) << "'ndrange_t'"; diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp index f3ffcf5..f838c9a 100644 --- a/lib/Sema/SemaDecl.cpp +++ b/lib/Sema/SemaDecl.cpp @@ -615,7 +615,7 @@ bool Sema::isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S) { CXXRecordDecl *RD = cast(CurContext); for (const auto &Base : RD->bases()) - if (Context.hasSameUnqualifiedType(QualType(Ty, 1), Base.getType())) + if (Ty && Context.hasSameUnqualifiedType(QualType(Ty, 1), Base.getType())) return true; return S->isFunctionPrototypeScope(); } @@ -13523,7 +13523,8 @@ CreateNewDecl: // If this is an undefined enum, warn. if (TUK != TUK_Definition && !Invalid) { TagDecl *Def; - if ((getLangOpts().CPlusPlus11 || getLangOpts().ObjC2) && + if (!EnumUnderlyingIsImplicit && + (getLangOpts().CPlusPlus11 || getLangOpts().ObjC2) && cast(New)->isFixed()) { // C++0x: 7.2p2: opaque-enum-declaration. // Conflicts are diagnosed above. Do nothing. @@ -15687,30 +15688,41 @@ static void checkModuleImportContext(Sema &S, Module *M, } } -Sema::DeclGroupPtrTy Sema::ActOnModuleDecl(SourceLocation ModuleLoc, +Sema::DeclGroupPtrTy Sema::ActOnModuleDecl(SourceLocation StartLoc, + SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path) { - // 'module implementation' requires that we are not compiling a module of any - // kind. 'module' and 'module partition' require that we are compiling a - // module inteface (not a module map). - auto CMK = getLangOpts().getCompilingModule(); - if (MDK == ModuleDeclKind::Implementation - ? CMK != LangOptions::CMK_None - : CMK != LangOptions::CMK_ModuleInterface) { + // A module implementation unit requires that we are not compiling a module + // of any kind. A module interface unit requires that we are not compiling a + // module map. + switch (getLangOpts().getCompilingModule()) { + case LangOptions::CMK_None: + // It's OK to compile a module interface as a normal translation unit. + break; + + case LangOptions::CMK_ModuleInterface: + if (MDK != ModuleDeclKind::Implementation) + break; + + // We were asked to compile a module interface unit but this is a module + // implementation unit. That indicates the 'export' is missing. Diag(ModuleLoc, diag::err_module_interface_implementation_mismatch) - << (unsigned)MDK; + << FixItHint::CreateInsertion(ModuleLoc, "export "); + break; + + case LangOptions::CMK_ModuleMap: + Diag(ModuleLoc, diag::err_module_decl_in_module_map_module); return nullptr; } // FIXME: Create a ModuleDecl and return it. // FIXME: Most of this work should be done by the preprocessor rather than - // here, in case we look ahead across something where the current - // module matters (eg a #include). + // here, in order to support macro import. - // The dots in a module name in the Modules TS are a lie. Unlike Clang's - // hierarchical module map modules, the dots here are just another character - // that can appear in a module name. Flatten down to the actual module name. + // Flatten the dots in a module name. Unlike Clang's hierarchical module map + // modules, the dots here are just another character that can appear in a + // module name. std::string ModuleName; for (auto &Piece : Path) { if (!ModuleName.empty()) @@ -15735,8 +15747,8 @@ Sema::DeclGroupPtrTy Sema::ActOnModuleDecl(SourceLocation ModuleLoc, case ModuleDeclKind::Module: { // FIXME: Check we're not in a submodule. - // We can't have imported a definition of this module or parsed a module - // map defining it already. + // We can't have parsed or imported a definition of this module or parsed a + // module map defining it already. if (auto *M = Map.findModule(ModuleName)) { Diag(Path[0].second, diag::err_module_redefinition) << ModuleName; if (M->DefinitionLoc.isValid()) @@ -15910,6 +15922,12 @@ Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, ExportDecl *D = ExportDecl::Create(Context, CurContext, ExportLoc); // C++ Modules TS draft: + // An export-declaration shall appear in the purview of a module other than + // the global module. + if (ModuleScopes.empty() || !ModuleScopes.back().Module || + ModuleScopes.back().Module->Kind != Module::ModuleInterfaceUnit) + Diag(ExportLoc, diag::err_export_not_in_module_interface); + // An export-declaration [...] shall not contain more than one // export keyword. // diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp index 027b3fe..17c6975 100644 --- a/lib/Sema/SemaDeclAttr.cpp +++ b/lib/Sema/SemaDeclAttr.cpp @@ -7220,6 +7220,8 @@ void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) { Body = FD->getBody(); } else if (auto *MD = dyn_cast(D)) Body = MD->getBody(); + else if (auto *BD = dyn_cast(D)) + Body = BD->getBody(); assert(Body && "Need a body here!"); diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp index 5a56f70..00480f8 100644 --- a/lib/Sema/SemaExpr.cpp +++ b/lib/Sema/SemaExpr.cpp @@ -171,9 +171,14 @@ DiagnoseAvailabilityOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc, if (AvailabilityResult Result = S.ShouldDiagnoseAvailabilityOfDecl(D, &Message)) { - if (Result == AR_NotYetIntroduced && S.getCurFunctionOrMethodDecl()) { - S.getEnclosingFunction()->HasPotentialAvailabilityViolations = true; - return; + if (Result == AR_NotYetIntroduced) { + if (S.getCurFunctionOrMethodDecl()) { + S.getEnclosingFunction()->HasPotentialAvailabilityViolations = true; + return; + } else if (S.getCurBlock() || S.getCurLambda()) { + S.getCurFunction()->HasPotentialAvailabilityViolations = true; + return; + } } const ObjCPropertyDecl *ObjCPDecl = nullptr; @@ -12498,6 +12503,9 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc, BSI->TheDecl->setBody(cast(Body)); + if (Body && getCurFunction()->HasPotentialAvailabilityViolations) + DiagnoseUnguardedAvailabilityViolations(BSI->TheDecl); + // Try to apply the named return value optimization. We have to check again // if we can do this, though, because blocks keep return statements around // to deduce an implicit return type. diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp index e4d420f..ce76e14 100644 --- a/lib/Sema/SemaLookup.cpp +++ b/lib/Sema/SemaLookup.cpp @@ -1326,12 +1326,6 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) { return !R.empty(); } -/// \brief Find the declaration that a class temploid member specialization was -/// instantiated from, or the member itself if it is an explicit specialization. -static Decl *getInstantiatedFrom(Decl *D, MemberSpecializationInfo *MSInfo) { - return MSInfo->isExplicitSpecialization() ? D : MSInfo->getInstantiatedFrom(); -} - Module *Sema::getOwningModule(Decl *Entity) { // If it's imported, grab its owning module. Module *M = Entity->getImportedOwningModule(); @@ -1413,12 +1407,11 @@ static Module *getDefiningModule(Sema &S, Decl *Entity) { if (CXXRecordDecl *Pattern = RD->getTemplateInstantiationPattern()) Entity = Pattern; } else if (EnumDecl *ED = dyn_cast(Entity)) { - if (MemberSpecializationInfo *MSInfo = ED->getMemberSpecializationInfo()) - Entity = getInstantiatedFrom(ED, MSInfo); + if (auto *Pattern = ED->getTemplateInstantiationPattern()) + Entity = Pattern; } else if (VarDecl *VD = dyn_cast(Entity)) { - // FIXME: Map from variable template specializations back to the template. - if (MemberSpecializationInfo *MSInfo = VD->getMemberSpecializationInfo()) - Entity = getInstantiatedFrom(VD, MSInfo); + if (VarDecl *Pattern = VD->getTemplateInstantiationPattern()) + Entity = Pattern; } // Walk up to the containing context. That might also have been instantiated diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp index d7a6d2e..fb13669 100644 --- a/lib/Sema/SemaOpenMP.cpp +++ b/lib/Sema/SemaOpenMP.cpp @@ -118,7 +118,9 @@ private: typedef SmallVector StackTy; /// \brief Stack of used declaration and their data-sharing attributes. - StackTy Stack; + DeclSAMapTy Threadprivates; + const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr; + SmallVector, 4> Stack; /// \brief true, if check for DSA must be from parent directive, false, if /// from current directive. OpenMPClauseKind ClauseKindMode = OMPC_unknown; @@ -133,8 +135,14 @@ private: /// \brief Checks if the variable is a local for OpenMP region. bool isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter); + bool isStackEmpty() const { + return Stack.empty() || + Stack.back().second != CurrentNonCapturingFunctionScope || + Stack.back().first.empty(); + } + public: - explicit DSAStackTy(Sema &S) : Stack(1), SemaRef(S) {} + explicit DSAStackTy(Sema &S) : SemaRef(S) {} bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; } void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; } @@ -144,13 +152,38 @@ public: void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc) { - Stack.push_back(SharingMapTy(DKind, DirName, CurScope, Loc)); - Stack.back().DefaultAttrLoc = Loc; + if (Stack.empty() || + Stack.back().second != CurrentNonCapturingFunctionScope) + Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope); + Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc); + Stack.back().first.back().DefaultAttrLoc = Loc; } void pop() { - assert(Stack.size() > 1 && "Data-sharing attributes stack is empty!"); - Stack.pop_back(); + assert(!Stack.back().first.empty() && + "Data-sharing attributes stack is empty!"); + Stack.back().first.pop_back(); + } + + /// Start new OpenMP region stack in new non-capturing function. + void pushFunction() { + const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction(); + assert(!isa(CurFnScope)); + CurrentNonCapturingFunctionScope = CurFnScope; + } + /// Pop region stack for non-capturing function. + void popFunction(const FunctionScopeInfo *OldFSI) { + if (!Stack.empty() && Stack.back().second == OldFSI) { + assert(Stack.back().first.empty()); + Stack.pop_back(); + } + CurrentNonCapturingFunctionScope = nullptr; + for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) { + if (!isa(FSI)) { + CurrentNonCapturingFunctionScope = FSI; + break; + } + } } void addCriticalWithHint(OMPCriticalDirective *D, llvm::APSInt Hint) { @@ -229,31 +262,35 @@ public: /// \brief Returns currently analyzed directive. OpenMPDirectiveKind getCurrentDirective() const { - return Stack.back().Directive; + return isStackEmpty() ? OMPD_unknown : Stack.back().first.back().Directive; } /// \brief Returns parent directive. OpenMPDirectiveKind getParentDirective() const { - if (Stack.size() > 2) - return Stack[Stack.size() - 2].Directive; - return OMPD_unknown; + if (isStackEmpty() || Stack.back().first.size() == 1) + return OMPD_unknown; + return std::next(Stack.back().first.rbegin())->Directive; } /// \brief Set default data sharing attribute to none. void setDefaultDSANone(SourceLocation Loc) { - Stack.back().DefaultAttr = DSA_none; - Stack.back().DefaultAttrLoc = Loc; + assert(!isStackEmpty()); + Stack.back().first.back().DefaultAttr = DSA_none; + Stack.back().first.back().DefaultAttrLoc = Loc; } /// \brief Set default data sharing attribute to shared. void setDefaultDSAShared(SourceLocation Loc) { - Stack.back().DefaultAttr = DSA_shared; - Stack.back().DefaultAttrLoc = Loc; + assert(!isStackEmpty()); + Stack.back().first.back().DefaultAttr = DSA_shared; + Stack.back().first.back().DefaultAttrLoc = Loc; } DefaultDataSharingAttributes getDefaultDSA() const { - return Stack.back().DefaultAttr; + return isStackEmpty() ? DSA_unspecified + : Stack.back().first.back().DefaultAttr; } SourceLocation getDefaultDSALocation() const { - return Stack.back().DefaultAttrLoc; + return isStackEmpty() ? SourceLocation() + : Stack.back().first.back().DefaultAttrLoc; } /// \brief Checks if the specified variable is a threadprivate. @@ -264,52 +301,64 @@ public: /// \brief Marks current region as ordered (it has an 'ordered' clause). void setOrderedRegion(bool IsOrdered, Expr *Param) { - Stack.back().OrderedRegion.setInt(IsOrdered); - Stack.back().OrderedRegion.setPointer(Param); + assert(!isStackEmpty()); + Stack.back().first.back().OrderedRegion.setInt(IsOrdered); + Stack.back().first.back().OrderedRegion.setPointer(Param); } /// \brief Returns true, if parent region is ordered (has associated /// 'ordered' clause), false - otherwise. bool isParentOrderedRegion() const { - if (Stack.size() > 2) - return Stack[Stack.size() - 2].OrderedRegion.getInt(); - return false; + if (isStackEmpty() || Stack.back().first.size() == 1) + return false; + return std::next(Stack.back().first.rbegin())->OrderedRegion.getInt(); } /// \brief Returns optional parameter for the ordered region. Expr *getParentOrderedRegionParam() const { - if (Stack.size() > 2) - return Stack[Stack.size() - 2].OrderedRegion.getPointer(); - return nullptr; + if (isStackEmpty() || Stack.back().first.size() == 1) + return nullptr; + return std::next(Stack.back().first.rbegin())->OrderedRegion.getPointer(); } /// \brief Marks current region as nowait (it has a 'nowait' clause). void setNowaitRegion(bool IsNowait = true) { - Stack.back().NowaitRegion = IsNowait; + assert(!isStackEmpty()); + Stack.back().first.back().NowaitRegion = IsNowait; } /// \brief Returns true, if parent region is nowait (has associated /// 'nowait' clause), false - otherwise. bool isParentNowaitRegion() const { - if (Stack.size() > 2) - return Stack[Stack.size() - 2].NowaitRegion; - return false; + if (isStackEmpty() || Stack.back().first.size() == 1) + return false; + return std::next(Stack.back().first.rbegin())->NowaitRegion; } /// \brief Marks parent region as cancel region. void setParentCancelRegion(bool Cancel = true) { - if (Stack.size() > 2) - Stack[Stack.size() - 2].CancelRegion = - Stack[Stack.size() - 2].CancelRegion || Cancel; + if (!isStackEmpty() && Stack.back().first.size() > 1) { + auto &StackElemRef = *std::next(Stack.back().first.rbegin()); + StackElemRef.CancelRegion |= StackElemRef.CancelRegion || Cancel; + } } /// \brief Return true if current region has inner cancel construct. - bool isCancelRegion() const { return Stack.back().CancelRegion; } + bool isCancelRegion() const { + return isStackEmpty() ? false : Stack.back().first.back().CancelRegion; + } /// \brief Set collapse value for the region. - void setAssociatedLoops(unsigned Val) { Stack.back().AssociatedLoops = Val; } + void setAssociatedLoops(unsigned Val) { + assert(!isStackEmpty()); + Stack.back().first.back().AssociatedLoops = Val; + } /// \brief Return collapse value for region. - unsigned getAssociatedLoops() const { return Stack.back().AssociatedLoops; } + unsigned getAssociatedLoops() const { + return isStackEmpty() ? 0 : Stack.back().first.back().AssociatedLoops; + } /// \brief Marks current target region as one with closely nested teams /// region. void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) { - if (Stack.size() > 2) - Stack[Stack.size() - 2].InnerTeamsRegionLoc = TeamsRegionLoc; + if (!isStackEmpty() && Stack.back().first.size() > 1) { + std::next(Stack.back().first.rbegin())->InnerTeamsRegionLoc = + TeamsRegionLoc; + } } /// \brief Returns true, if current region has closely nested teams region. bool hasInnerTeamsRegion() const { @@ -317,14 +366,20 @@ public: } /// \brief Returns location of the nested teams region (if any). SourceLocation getInnerTeamsRegionLoc() const { - if (Stack.size() > 1) - return Stack.back().InnerTeamsRegionLoc; - return SourceLocation(); + return isStackEmpty() ? SourceLocation() + : Stack.back().first.back().InnerTeamsRegionLoc; } - Scope *getCurScope() const { return Stack.back().CurScope; } - Scope *getCurScope() { return Stack.back().CurScope; } - SourceLocation getConstructLoc() { return Stack.back().ConstructLoc; } + Scope *getCurScope() const { + return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope; + } + Scope *getCurScope() { + return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope; + } + SourceLocation getConstructLoc() { + return isStackEmpty() ? SourceLocation() + : Stack.back().first.back().ConstructLoc; + } /// Do the check specified in \a Check to all component lists and return true /// if any issue is found. @@ -333,8 +388,10 @@ public: const llvm::function_ref< bool(OMPClauseMappableExprCommon::MappableExprComponentListRef, OpenMPClauseKind)> &Check) { - auto SI = Stack.rbegin(); - auto SE = Stack.rend(); + if (isStackEmpty()) + return false; + auto SI = Stack.back().first.rbegin(); + auto SE = Stack.back().first.rend(); if (SI == SE) return false; @@ -361,9 +418,9 @@ public: ValueDecl *VD, OMPClauseMappableExprCommon::MappableExprComponentListRef Components, OpenMPClauseKind WhereFoundClauseKind) { - assert(Stack.size() > 1 && + assert(!isStackEmpty() && "Not expecting to retrieve components from a empty stack!"); - auto &MEC = Stack.back().MappedExprComponents[VD]; + auto &MEC = Stack.back().first.back().MappedExprComponents[VD]; // Create new entry and append the new components there. MEC.Components.resize(MEC.Components.size() + 1); MEC.Components.back().append(Components.begin(), Components.end()); @@ -371,23 +428,25 @@ public: } unsigned getNestingLevel() const { - assert(Stack.size() > 1); - return Stack.size() - 2; + assert(!isStackEmpty()); + return Stack.back().first.size() - 1; } void addDoacrossDependClause(OMPDependClause *C, OperatorOffsetTy &OpsOffs) { - assert(Stack.size() > 2); - assert(isOpenMPWorksharingDirective(Stack[Stack.size() - 2].Directive)); - Stack[Stack.size() - 2].DoacrossDepends.insert({C, OpsOffs}); + assert(!isStackEmpty() && Stack.back().first.size() > 1); + auto &StackElem = *std::next(Stack.back().first.rbegin()); + assert(isOpenMPWorksharingDirective(StackElem.Directive)); + StackElem.DoacrossDepends.insert({C, OpsOffs}); } llvm::iterator_range getDoacrossDependClauses() const { - assert(Stack.size() > 1); - if (isOpenMPWorksharingDirective(Stack[Stack.size() - 1].Directive)) { - auto &Ref = Stack[Stack.size() - 1].DoacrossDepends; + assert(!isStackEmpty()); + auto &StackElem = Stack.back().first.back(); + if (isOpenMPWorksharingDirective(StackElem.Directive)) { + auto &Ref = StackElem.DoacrossDepends; return llvm::make_range(Ref.begin(), Ref.end()); } - return llvm::make_range(Stack[0].DoacrossDepends.end(), - Stack[0].DoacrossDepends.end()); + return llvm::make_range(StackElem.DoacrossDepends.end(), + StackElem.DoacrossDepends.end()); } }; bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) { @@ -416,7 +475,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter, auto *VD = dyn_cast(D); auto *FD = dyn_cast(D); DSAVarData DVar; - if (Iter == std::prev(Stack.rend())) { + if (isStackEmpty() || Iter == Stack.back().first.rend()) { // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a region but not in construct] // File-scope or namespace-scope variables referenced in called routines @@ -490,8 +549,9 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter, // bound to the current team is shared. if (isOpenMPTaskingDirective(DVar.DKind)) { DSAVarData DVarTemp; - for (StackTy::reverse_iterator I = std::next(Iter), EE = Stack.rend(); - I != EE; ++I) { + auto I = Iter, E = Stack.back().first.rend(); + do { + ++I; // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables // Referenced in a Construct, implicitly determined, p.6] // In a task construct, if no default clause is present, a variable @@ -503,9 +563,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter, DVar.CKind = OMPC_firstprivate; return DVar; } - if (isParallelOrTaskRegion(I->Directive)) - break; - } + } while (I != E && !isParallelOrTaskRegion(I->Directive)); DVar.CKind = (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared; return DVar; @@ -520,12 +578,13 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter, } Expr *DSAStackTy::addUniqueAligned(ValueDecl *D, Expr *NewDE) { - assert(Stack.size() > 1 && "Data sharing attributes stack is empty"); + assert(!isStackEmpty() && "Data sharing attributes stack is empty"); D = getCanonicalDecl(D); - auto It = Stack.back().AlignedMap.find(D); - if (It == Stack.back().AlignedMap.end()) { + auto &StackElem = Stack.back().first.back(); + auto It = StackElem.AlignedMap.find(D); + if (It == StackElem.AlignedMap.end()) { assert(NewDE && "Unexpected nullptr expr to be added into aligned map"); - Stack.back().AlignedMap[D] = NewDE; + StackElem.AlignedMap[D] = NewDE; return nullptr; } else { assert(It->second && "Unexpected nullptr expr in the aligned map"); @@ -535,35 +594,43 @@ Expr *DSAStackTy::addUniqueAligned(ValueDecl *D, Expr *NewDE) { } void DSAStackTy::addLoopControlVariable(ValueDecl *D, VarDecl *Capture) { - assert(Stack.size() > 1 && "Data-sharing attributes stack is empty"); + assert(!isStackEmpty() && "Data-sharing attributes stack is empty"); D = getCanonicalDecl(D); - Stack.back().LCVMap.insert( - std::make_pair(D, LCDeclInfo(Stack.back().LCVMap.size() + 1, Capture))); + auto &StackElem = Stack.back().first.back(); + StackElem.LCVMap.insert( + {D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture)}); } DSAStackTy::LCDeclInfo DSAStackTy::isLoopControlVariable(ValueDecl *D) { - assert(Stack.size() > 1 && "Data-sharing attributes stack is empty"); + assert(!isStackEmpty() && "Data-sharing attributes stack is empty"); D = getCanonicalDecl(D); - return Stack.back().LCVMap.count(D) > 0 ? Stack.back().LCVMap[D] - : LCDeclInfo(0, nullptr); + auto &StackElem = Stack.back().first.back(); + auto It = StackElem.LCVMap.find(D); + if (It != StackElem.LCVMap.end()) + return It->second; + return {0, nullptr}; } DSAStackTy::LCDeclInfo DSAStackTy::isParentLoopControlVariable(ValueDecl *D) { - assert(Stack.size() > 2 && "Data-sharing attributes stack is empty"); + assert(!isStackEmpty() && Stack.back().first.size() > 1 && + "Data-sharing attributes stack is empty"); D = getCanonicalDecl(D); - return Stack[Stack.size() - 2].LCVMap.count(D) > 0 - ? Stack[Stack.size() - 2].LCVMap[D] - : LCDeclInfo(0, nullptr); + auto &StackElem = *std::next(Stack.back().first.rbegin()); + auto It = StackElem.LCVMap.find(D); + if (It != StackElem.LCVMap.end()) + return It->second; + return {0, nullptr}; } ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) { - assert(Stack.size() > 2 && "Data-sharing attributes stack is empty"); - if (Stack[Stack.size() - 2].LCVMap.size() < I) + assert(!isStackEmpty() && Stack.back().first.size() > 1 && + "Data-sharing attributes stack is empty"); + auto &StackElem = *std::next(Stack.back().first.rbegin()); + if (StackElem.LCVMap.size() < I) return nullptr; - for (auto &Pair : Stack[Stack.size() - 2].LCVMap) { + for (auto &Pair : StackElem.LCVMap) if (Pair.second.first == I) return Pair.first; - } return nullptr; } @@ -571,13 +638,13 @@ void DSAStackTy::addDSA(ValueDecl *D, Expr *E, OpenMPClauseKind A, DeclRefExpr *PrivateCopy) { D = getCanonicalDecl(D); if (A == OMPC_threadprivate) { - auto &Data = Stack[0].SharingMap[D]; + auto &Data = Threadprivates[D]; Data.Attributes = A; Data.RefExpr.setPointer(E); Data.PrivateCopy = nullptr; } else { - assert(Stack.size() > 1 && "Data-sharing attributes stack is empty"); - auto &Data = Stack.back().SharingMap[D]; + assert(!isStackEmpty() && "Data-sharing attributes stack is empty"); + auto &Data = Stack.back().first.back().SharingMap[D]; assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) || (A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) || (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) || @@ -592,7 +659,7 @@ void DSAStackTy::addDSA(ValueDecl *D, Expr *E, OpenMPClauseKind A, Data.RefExpr.setPointerAndInt(E, IsLastprivate); Data.PrivateCopy = PrivateCopy; if (PrivateCopy) { - auto &Data = Stack.back().SharingMap[PrivateCopy->getDecl()]; + auto &Data = Stack.back().first.back().SharingMap[PrivateCopy->getDecl()]; Data.Attributes = A; Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate); Data.PrivateCopy = nullptr; @@ -602,19 +669,17 @@ void DSAStackTy::addDSA(ValueDecl *D, Expr *E, OpenMPClauseKind A, bool DSAStackTy::isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter) { D = D->getCanonicalDecl(); - if (Stack.size() > 2) { - reverse_iterator I = Iter, E = std::prev(Stack.rend()); + if (!isStackEmpty() && Stack.back().first.size() > 1) { + reverse_iterator I = Iter, E = Stack.back().first.rend(); Scope *TopScope = nullptr; - while (I != E && !isParallelOrTaskRegion(I->Directive)) { + while (I != E && !isParallelOrTaskRegion(I->Directive)) ++I; - } if (I == E) return false; TopScope = I->CurScope ? I->CurScope->getParent() : nullptr; Scope *CurScope = getCurScope(); - while (CurScope != TopScope && !CurScope->isDeclScope(D)) { + while (CurScope != TopScope && !CurScope->isDeclScope(D)) CurScope = CurScope->getParent(); - } return CurScope != TopScope; } return false; @@ -665,16 +730,16 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, bool FromParent) { D->getLocation()), OMPC_threadprivate); } - if (Stack[0].SharingMap.count(D)) { - DVar.RefExpr = Stack[0].SharingMap[D].RefExpr.getPointer(); + auto TI = Threadprivates.find(D); + if (TI != Threadprivates.end()) { + DVar.RefExpr = TI->getSecond().RefExpr.getPointer(); DVar.CKind = OMPC_threadprivate; return DVar; } - if (Stack.size() == 1) { + if (isStackEmpty()) // Not in OpenMP execution region and top scope was already checked. return DVar; - } // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, C/C++, predetermined, p.4] @@ -722,11 +787,10 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, bool FromParent) { // Explicitly specified attributes and local variables with predetermined // attributes. - auto StartI = std::next(Stack.rbegin()); - auto EndI = std::prev(Stack.rend()); - if (FromParent && StartI != EndI) { + auto StartI = std::next(Stack.back().first.rbegin()); + auto EndI = Stack.back().first.rend(); + if (FromParent && StartI != EndI) StartI = std::next(StartI); - } auto I = std::prev(StartI); if (I->SharingMap.count(D)) { DVar.RefExpr = I->SharingMap[D].RefExpr.getPointer(); @@ -740,12 +804,15 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, bool FromParent) { DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D, bool FromParent) { + if (isStackEmpty()) { + StackTy::reverse_iterator I; + return getDSA(I, D); + } D = getCanonicalDecl(D); - auto StartI = Stack.rbegin(); - auto EndI = std::prev(Stack.rend()); - if (FromParent && StartI != EndI) { + auto StartI = Stack.back().first.rbegin(); + auto EndI = Stack.back().first.rend(); + if (FromParent && StartI != EndI) StartI = std::next(StartI); - } return getDSA(StartI, D); } @@ -754,33 +821,40 @@ DSAStackTy::hasDSA(ValueDecl *D, const llvm::function_ref &CPred, const llvm::function_ref &DPred, bool FromParent) { + if (isStackEmpty()) + return {}; D = getCanonicalDecl(D); - auto StartI = std::next(Stack.rbegin()); - auto EndI = Stack.rend(); - if (FromParent && StartI != EndI) { + auto StartI = std::next(Stack.back().first.rbegin()); + auto EndI = Stack.back().first.rend(); + if (FromParent && StartI != EndI) StartI = std::next(StartI); - } - for (auto I = StartI, EE = EndI; I != EE; ++I) { + if (StartI == EndI) + return {}; + auto I = std::prev(StartI); + do { + ++I; if (!DPred(I->Directive) && !isParallelOrTaskRegion(I->Directive)) continue; DSAVarData DVar = getDSA(I, D); if (CPred(DVar.CKind)) return DVar; - } - return DSAVarData(); + } while (I != EndI); + return {}; } DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA( ValueDecl *D, const llvm::function_ref &CPred, const llvm::function_ref &DPred, bool FromParent) { + if (isStackEmpty()) + return {}; D = getCanonicalDecl(D); - auto StartI = std::next(Stack.rbegin()); - auto EndI = Stack.rend(); + auto StartI = std::next(Stack.back().first.rbegin()); + auto EndI = Stack.back().first.rend(); if (FromParent && StartI != EndI) StartI = std::next(StartI); if (StartI == EndI || !DPred(StartI->Directive)) - return DSAVarData(); + return {}; DSAVarData DVar = getDSA(StartI, D); return CPred(DVar.CKind) ? DVar : DSAVarData(); } @@ -790,9 +864,11 @@ bool DSAStackTy::hasExplicitDSA( unsigned Level, bool NotLastprivate) { if (CPred(ClauseKindMode)) return true; + if (isStackEmpty()) + return false; D = getCanonicalDecl(D); - auto StartI = std::next(Stack.begin()); - auto EndI = Stack.end(); + auto StartI = Stack.back().first.begin(); + auto EndI = Stack.back().first.end(); if (std::distance(StartI, EndI) <= (int)Level) return false; std::advance(StartI, Level); @@ -805,8 +881,10 @@ bool DSAStackTy::hasExplicitDSA( bool DSAStackTy::hasExplicitDirective( const llvm::function_ref &DPred, unsigned Level) { - auto StartI = std::next(Stack.begin()); - auto EndI = Stack.end(); + if (isStackEmpty()) + return false; + auto StartI = Stack.back().first.begin(); + auto EndI = Stack.back().first.end(); if (std::distance(StartI, EndI) <= (int)Level) return false; std::advance(StartI, Level); @@ -819,13 +897,12 @@ bool DSAStackTy::hasDirective( &DPred, bool FromParent) { // We look only in the enclosing region. - if (Stack.size() < 2) + if (isStackEmpty()) return false; - auto StartI = std::next(Stack.rbegin()); - auto EndI = std::prev(Stack.rend()); - if (FromParent && StartI != EndI) { + auto StartI = std::next(Stack.back().first.rbegin()); + auto EndI = Stack.back().first.rend(); + if (FromParent && StartI != EndI) StartI = std::next(StartI); - } for (auto I = StartI, EE = EndI; I != EE; ++I) { if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc)) return true; @@ -839,6 +916,14 @@ void Sema::InitDataSharingAttributesStack() { #define DSAStack static_cast(VarDataSharingAttributesStack) +void Sema::pushOpenMPFunctionRegion() { + DSAStack->pushFunction(); +} + +void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) { + DSAStack->popFunction(OldFSI); +} + bool Sema::IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level) { assert(LangOpts.OpenMP && "OpenMP is not allowed"); diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h index 693b5c0..f4a97f5 100644 --- a/lib/Sema/TreeTransform.h +++ b/lib/Sema/TreeTransform.h @@ -2220,6 +2220,9 @@ public: Base = BaseResult.get(); QualType BaseType = Base->getType(); + if (isArrow && !BaseType->isPointerType()) + return ExprError(); + // FIXME: this involves duplicating earlier analysis in a lot of // cases; we should avoid this when possible. LookupResult R(getSema(), MemberNameInfo, Sema::LookupMemberName); diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp index 406c4b5..7b1edc0 100644 --- a/lib/Serialization/ASTReader.cpp +++ b/lib/Serialization/ASTReader.cpp @@ -4779,6 +4779,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) { ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap(); bool First = true; Module *CurrentModule = nullptr; + Module::ModuleKind ModuleKind = Module::ModuleMapModule; RecordData Record; while (true) { llvm::BitstreamEntry Entry = F.Stream.advanceSkippingSubblocks(); @@ -4871,6 +4872,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) { CurrentModule->setASTFile(F.File); } + CurrentModule->Kind = ModuleKind; CurrentModule->Signature = F.Signature; CurrentModule->IsFromModuleFile = true; CurrentModule->IsSystem = IsSystem || CurrentModule->IsSystem; @@ -4969,6 +4971,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) { SubmodulesLoaded.resize(SubmodulesLoaded.size() + F.LocalNumSubmodules); } + ModuleKind = (Module::ModuleKind)Record[2]; break; } diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp index bb86907..84d2420 100644 --- a/lib/Serialization/ASTWriter.cpp +++ b/lib/Serialization/ASTWriter.cpp @@ -2694,9 +2694,10 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) { unsigned ConflictAbbrev = Stream.EmitAbbrev(std::move(Abbrev)); // Write the submodule metadata block. - RecordData::value_type Record[] = {getNumberOfModules(WritingModule), - FirstSubmoduleID - - NUM_PREDEF_SUBMODULE_IDS}; + RecordData::value_type Record[] = { + getNumberOfModules(WritingModule), + FirstSubmoduleID - NUM_PREDEF_SUBMODULE_IDS, + (unsigned)WritingModule->Kind}; Stream.EmitRecord(SUBMODULE_METADATA, Record); // Write all of the submodules. diff --git a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp index 6e9b7fe..5730517 100644 --- a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp @@ -177,7 +177,10 @@ public: II_wcsdup(nullptr), II_win_wcsdup(nullptr), II_g_malloc(nullptr), II_g_malloc0(nullptr), II_g_realloc(nullptr), II_g_try_malloc(nullptr), II_g_try_malloc0(nullptr), II_g_try_realloc(nullptr), - II_g_free(nullptr), II_g_memdup(nullptr) {} + II_g_free(nullptr), II_g_memdup(nullptr), II_g_malloc_n(nullptr), + II_g_malloc0_n(nullptr), II_g_realloc_n(nullptr), + II_g_try_malloc_n(nullptr), II_g_try_malloc0_n(nullptr), + II_g_try_realloc_n(nullptr) {} /// In pessimistic mode, the checker assumes that it does not know which /// functions might free the memory. @@ -241,7 +244,10 @@ private: *II_if_nameindex, *II_if_freenameindex, *II_wcsdup, *II_win_wcsdup, *II_g_malloc, *II_g_malloc0, *II_g_realloc, *II_g_try_malloc, *II_g_try_malloc0, - *II_g_try_realloc, *II_g_free, *II_g_memdup; + *II_g_try_realloc, *II_g_free, *II_g_memdup, + *II_g_malloc_n, *II_g_malloc0_n, *II_g_realloc_n, + *II_g_try_malloc_n, *II_g_try_malloc0_n, + *II_g_try_realloc_n; mutable Optional KernelZeroFlagVal; void initIdentifierInfo(ASTContext &C) const; @@ -321,9 +327,12 @@ private: bool &ReleasedAllocated, bool ReturnsNullOnFailure = false) const; - ProgramStateRef ReallocMem(CheckerContext &C, const CallExpr *CE, - bool FreesMemOnFailure, - ProgramStateRef State) const; + ProgramStateRef ReallocMemAux(CheckerContext &C, const CallExpr *CE, + bool FreesMemOnFailure, + ProgramStateRef State, + bool SuffixWithN = false) const; + static SVal evalMulForBufferSize(CheckerContext &C, const Expr *Blocks, + const Expr *BlockBytes); static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE, ProgramStateRef State); @@ -569,6 +578,12 @@ void MallocChecker::initIdentifierInfo(ASTContext &Ctx) const { II_g_try_realloc = &Ctx.Idents.get("g_try_realloc"); II_g_free = &Ctx.Idents.get("g_free"); II_g_memdup = &Ctx.Idents.get("g_memdup"); + II_g_malloc_n = &Ctx.Idents.get("g_malloc_n"); + II_g_malloc0_n = &Ctx.Idents.get("g_malloc0_n"); + II_g_realloc_n = &Ctx.Idents.get("g_realloc_n"); + II_g_try_malloc_n = &Ctx.Idents.get("g_try_malloc_n"); + II_g_try_malloc0_n = &Ctx.Idents.get("g_try_malloc0_n"); + II_g_try_realloc_n = &Ctx.Idents.get("g_try_realloc_n"); } bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const { @@ -617,7 +632,10 @@ bool MallocChecker::isCMemFunction(const FunctionDecl *FD, FunI == II_g_malloc || FunI == II_g_malloc0 || FunI == II_g_realloc || FunI == II_g_try_malloc || FunI == II_g_try_malloc0 || FunI == II_g_try_realloc || - FunI == II_g_memdup) + FunI == II_g_memdup || FunI == II_g_malloc_n || + FunI == II_g_malloc0_n || FunI == II_g_realloc_n || + FunI == II_g_try_malloc_n || FunI == II_g_try_malloc0_n || + FunI == II_g_try_realloc_n) return true; } @@ -767,6 +785,17 @@ llvm::Optional MallocChecker::performKernelMalloc( return None; } +SVal MallocChecker::evalMulForBufferSize(CheckerContext &C, const Expr *Blocks, + const Expr *BlockBytes) { + SValBuilder &SB = C.getSValBuilder(); + SVal BlocksVal = C.getSVal(Blocks); + SVal BlockBytesVal = C.getSVal(BlockBytes); + ProgramStateRef State = C.getState(); + SVal TotalSize = SB.evalBinOp(State, BO_Mul, BlocksVal, BlockBytesVal, + SB.getContext().getSizeType()); + return TotalSize; +} + void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const { if (C.wasInlined) return; @@ -813,10 +842,10 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const { State = ProcessZeroAllocation(C, CE, 0, State); } else if (FunI == II_realloc || FunI == II_g_realloc || FunI == II_g_try_realloc) { - State = ReallocMem(C, CE, false, State); + State = ReallocMemAux(C, CE, false, State); State = ProcessZeroAllocation(C, CE, 1, State); } else if (FunI == II_reallocf) { - State = ReallocMem(C, CE, true, State); + State = ReallocMemAux(C, CE, true, State); State = ProcessZeroAllocation(C, CE, 1, State); } else if (FunI == II_calloc) { State = CallocMem(C, CE, State); @@ -874,6 +903,25 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const { return; State = MallocMemAux(C, CE, CE->getArg(1), UndefinedVal(), State); State = ProcessZeroAllocation(C, CE, 1, State); + } else if (FunI == II_g_malloc_n || FunI == II_g_try_malloc_n || + FunI == II_g_malloc0_n || FunI == II_g_try_malloc0_n) { + if (CE->getNumArgs() < 2) + return; + SVal Init = UndefinedVal(); + if (FunI == II_g_malloc0_n || FunI == II_g_try_malloc0_n) { + SValBuilder &SB = C.getSValBuilder(); + Init = SB.makeZeroVal(SB.getContext().CharTy); + } + SVal TotalSize = evalMulForBufferSize(C, CE->getArg(0), CE->getArg(1)); + State = MallocMemAux(C, CE, TotalSize, Init, State); + State = ProcessZeroAllocation(C, CE, 0, State); + State = ProcessZeroAllocation(C, CE, 1, State); + } else if (FunI == II_g_realloc_n || FunI == II_g_try_realloc_n) { + if (CE->getNumArgs() < 3) + return; + State = ReallocMemAux(C, CE, false, State, true); + State = ProcessZeroAllocation(C, CE, 1, State); + State = ProcessZeroAllocation(C, CE, 2, State); } } @@ -1976,14 +2024,17 @@ void MallocChecker::ReportUseZeroAllocated(CheckerContext &C, } } -ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C, - const CallExpr *CE, - bool FreesOnFail, - ProgramStateRef State) const { +ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C, + const CallExpr *CE, + bool FreesOnFail, + ProgramStateRef State, + bool SuffixWithN) const { if (!State) return nullptr; - if (CE->getNumArgs() < 2) + if (SuffixWithN && CE->getNumArgs() < 3) + return nullptr; + else if (CE->getNumArgs() < 2) return nullptr; const Expr *arg0Expr = CE->getArg(0); @@ -1998,20 +2049,19 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C, DefinedOrUnknownSVal PtrEQ = svalBuilder.evalEQ(State, arg0Val, svalBuilder.makeNull()); - // Get the size argument. If there is no size arg then give up. + // Get the size argument. const Expr *Arg1 = CE->getArg(1); - if (!Arg1) - return nullptr; // Get the value of the size argument. - SVal Arg1ValG = State->getSVal(Arg1, LCtx); - if (!Arg1ValG.getAs()) + SVal TotalSize = State->getSVal(Arg1, LCtx); + if (SuffixWithN) + TotalSize = evalMulForBufferSize(C, Arg1, CE->getArg(2)); + if (!TotalSize.getAs()) return nullptr; - DefinedOrUnknownSVal Arg1Val = Arg1ValG.castAs(); // Compare the size argument to 0. DefinedOrUnknownSVal SizeZero = - svalBuilder.evalEQ(State, Arg1Val, + svalBuilder.evalEQ(State, TotalSize.castAs(), svalBuilder.makeIntValWithPtrWidth(0, false)); ProgramStateRef StatePtrIsNull, StatePtrNotNull; @@ -2025,8 +2075,8 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C, // If the ptr is NULL and the size is not 0, the call is equivalent to // malloc(size). - if ( PrtIsNull && !SizeIsZero) { - ProgramStateRef stateMalloc = MallocMemAux(C, CE, CE->getArg(1), + if (PrtIsNull && !SizeIsZero) { + ProgramStateRef stateMalloc = MallocMemAux(C, CE, TotalSize, UndefinedVal(), StatePtrIsNull); return stateMalloc; } @@ -2059,7 +2109,7 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C, if (ProgramStateRef stateFree = FreeMemAux(C, CE, State, 0, false, ReleasedAllocated)) { - ProgramStateRef stateRealloc = MallocMemAux(C, CE, CE->getArg(1), + ProgramStateRef stateRealloc = MallocMemAux(C, CE, TotalSize, UnknownVal(), stateFree); if (!stateRealloc) return nullptr; @@ -2090,12 +2140,8 @@ ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE, return nullptr; SValBuilder &svalBuilder = C.getSValBuilder(); - const LocationContext *LCtx = C.getLocationContext(); - SVal count = State->getSVal(CE->getArg(0), LCtx); - SVal elementSize = State->getSVal(CE->getArg(1), LCtx); - SVal TotalSize = svalBuilder.evalBinOp(State, BO_Mul, count, elementSize, - svalBuilder.getContext().getSizeType()); SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy); + SVal TotalSize = evalMulForBufferSize(C, CE->getArg(0), CE->getArg(1)); return MallocMemAux(C, CE, TotalSize, zeroVal, State); } diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp index 7309741..d00182a 100644 --- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp +++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp @@ -61,7 +61,9 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) { return U->getSubExpr()->IgnoreParenCasts(); } else if (const MemberExpr *ME = dyn_cast(E)) { - if (ME->isArrow() || isDeclRefExprToReference(ME->getBase())) { + if (ME->isImplicitAccess()) { + return ME; + } else if (ME->isArrow() || isDeclRefExprToReference(ME->getBase())) { return ME->getBase()->IgnoreParenCasts(); } else { // If we have a member expr with a dot, the base must have been @@ -73,9 +75,9 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) { return IvarRef->getBase()->IgnoreParenCasts(); } else if (const ArraySubscriptExpr *AE = dyn_cast(E)) { - return AE->getBase(); + return getDerefExpr(AE->getBase()); } - else if (isDeclRefExprToReference(E)) { + else if (isa(E)) { return E; } break; @@ -961,7 +963,24 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N, const Expr *Inner = nullptr; if (const Expr *Ex = dyn_cast(S)) { Ex = Ex->IgnoreParenCasts(); - if (ExplodedGraph::isInterestingLValueExpr(Ex) || CallEvent::isCallStmt(Ex)) + + // Performing operator `&' on an lvalue expression is essentially a no-op. + // Then, if we are taking addresses of fields or elements, these are also + // unlikely to matter. + // FIXME: There's a hack in our Store implementation that always computes + // field offsets around null pointers as if they are always equal to 0. + // The idea here is to report accesses to fields as null dereferences + // even though the pointer value that's being dereferenced is actually + // the offset of the field rather than exactly 0. + // See the FIXME in StoreManager's getLValueFieldOrIvar() method. + // This code interacts heavily with this hack; otherwise the value + // would not be null at all for most fields, so we'd be unable to track it. + if (const auto *Op = dyn_cast(Ex)) + if (Op->getOpcode() == UO_AddrOf && Op->getSubExpr()->isLValue()) + if (const Expr *DerefEx = getDerefExpr(Op->getSubExpr())) + Ex = DerefEx; + + if (Ex && (ExplodedGraph::isInterestingLValueExpr(Ex) || CallEvent::isCallStmt(Ex))) Inner = Ex; } diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp index 9e6ec09..8ee3419 100644 --- a/lib/StaticAnalyzer/Core/ExprEngine.cpp +++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp @@ -1904,8 +1904,8 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) { // Evaluate the LHS of the case value. llvm::APSInt V1 = Case->getLHS()->EvaluateKnownConstInt(getContext()); - assert(V1.getBitWidth() == getContext().getTypeSize(CondE->getType())); - + assert(V1.getBitWidth() == getContext().getIntWidth(CondE->getType())); + // Get the RHS of the case, if it exists. llvm::APSInt V2; if (const Expr *E = Case->getRHS()) diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp index dd7e9dd..3000e13 100644 --- a/lib/StaticAnalyzer/Core/RegionStore.cpp +++ b/lib/StaticAnalyzer/Core/RegionStore.cpp @@ -1338,6 +1338,9 @@ RegionStoreManager::getSizeInElements(ProgramStateRef state, /// the array). This is called by ExprEngine when evaluating casts /// from arrays to pointers. SVal RegionStoreManager::ArrayToPointer(Loc Array, QualType T) { + if (Array.getAs()) + return Array; + if (!Array.getAs()) return UnknownVal(); diff --git a/lib/StaticAnalyzer/Core/Store.cpp b/lib/StaticAnalyzer/Core/Store.cpp index ba48a60..1af49f6 100644 --- a/lib/StaticAnalyzer/Core/Store.cpp +++ b/lib/StaticAnalyzer/Core/Store.cpp @@ -404,9 +404,15 @@ SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) { case loc::ConcreteIntKind: // While these seem funny, this can happen through casts. - // FIXME: What we should return is the field offset. For example, - // add the field offset to the integer value. That way funny things + // FIXME: What we should return is the field offset, not base. For example, + // add the field offset to the integer value. That way things // like this work properly: &(((struct foo *) 0xa)->f) + // However, that's not easy to fix without reducing our abilities + // to catch null pointer dereference. Eg., ((struct foo *)0x0)->f = 7 + // is a null dereference even though we're dereferencing offset of f + // rather than null. Coming up with an approach that computes offsets + // over null pointers properly while still being able to catch null + // dereferences might be worth it. return Base; default: @@ -431,7 +437,7 @@ SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset, // If the base is an unknown or undefined value, just return it back. // FIXME: For absolute pointer addresses, we just return that value back as // well, although in reality we should return the offset added to that - // value. + // value. See also the similar FIXME in getLValueFieldOrIvar(). if (Base.isUnknownOrUndef() || Base.getAs()) return Base; diff --git a/test/Analysis/enum.cpp b/test/Analysis/enum.cpp index e26b8f0..b561e65 100644 --- a/test/Analysis/enum.cpp +++ b/test/Analysis/enum.cpp @@ -24,3 +24,16 @@ void testCasting(int i) { clang_analyzer_eval(j == 0); // expected-warning{{FALSE}} } } + +enum class EnumBool : bool { + F = false, + T = true +}; + +bool testNoCrashOnSwitchEnumBool(EnumBool E) { + switch (E) { + case EnumBool::F: + return false; + } + return true; +} diff --git a/test/Analysis/gmalloc.c b/test/Analysis/gmalloc.c index 10c4fe2..50413e2 100644 --- a/test/Analysis/gmalloc.c +++ b/test/Analysis/gmalloc.c @@ -13,6 +13,12 @@ gpointer g_realloc(gpointer mem, gsize n_bytes); gpointer g_try_malloc(gsize n_bytes); gpointer g_try_malloc0(gsize n_bytes); gpointer g_try_realloc(gpointer mem, gsize n_bytes); +gpointer g_malloc_n(gsize n_blocks, gsize n_block_bytes); +gpointer g_malloc0_n(gsize n_blocks, gsize n_block_bytes); +gpointer g_realloc_n(gpointer mem, gsize n_blocks, gsize n_block_bytes); +gpointer g_try_malloc_n(gsize n_blocks, gsize n_block_bytes); +gpointer g_try_malloc0_n(gsize n_blocks, gsize n_block_bytes); +gpointer g_try_realloc_n(gpointer mem, gsize n_blocks, gsize n_block_bytes); void g_free(gpointer mem); gpointer g_memdup(gconstpointer mem, guint byte_size); @@ -25,6 +31,12 @@ void f1() { gpointer g3 = g_try_malloc(n_bytes); gpointer g4 = g_try_malloc0(n_bytes); g3 = g_try_realloc(g3, n_bytes * 2); + gpointer g5 = g_malloc_n(n_bytes, sizeof(char)); + gpointer g6 = g_malloc0_n(n_bytes, sizeof(char)); + g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char)); + gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char)); + gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char)); + g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char)); g_free(g1); g_free(g2); @@ -38,6 +50,12 @@ void f2() { gpointer g3 = g_try_malloc(n_bytes); gpointer g4 = g_try_malloc0(n_bytes); g3 = g_try_realloc(g3, n_bytes * 2); + gpointer g5 = g_malloc_n(n_bytes, sizeof(char)); + gpointer g6 = g_malloc0_n(n_bytes, sizeof(char)); + g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char)); + gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char)); + gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char)); + g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char)); g_free(g1); g_free(g2); @@ -52,8 +70,100 @@ void f3() { gpointer g3 = g_try_malloc(n_bytes); gpointer g4 = g_try_malloc0(n_bytes); g3 = g_try_realloc(g3, n_bytes * 2); // expected-warning{{Potential leak of memory pointed to by 'g4'}} + gpointer g5 = g_malloc_n(n_bytes, sizeof(char)); + gpointer g6 = g_malloc0_n(n_bytes, sizeof(char)); + g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g6'}} + gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g5'}} + gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char)); + g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g8'}} + + g_free(g1); // expected-warning{{Potential leak of memory pointed to by 'g7'}} + g_free(g2); + g_free(g3); +} + +void f4() { + gpointer g1 = g_malloc(n_bytes); + gpointer g2 = g_malloc0(n_bytes); + g1 = g_realloc(g1, n_bytes * 2); + gpointer g3 = g_try_malloc(n_bytes); + gpointer g4 = g_try_malloc0(n_bytes); + g3 = g_try_realloc(g3, n_bytes * 2); + gpointer g5 = g_malloc_n(n_bytes, sizeof(char)); + gpointer g6 = g_malloc0_n(n_bytes, sizeof(char)); + g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g6'}} + gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g5'}} + gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char)); + g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g8'}} + + g_free(g1); // expected-warning{{Potential leak of memory pointed to by 'g7'}} + g_free(g2); + g_free(g3); + g_free(g4); +} + +void f5() { + gpointer g1 = g_malloc(n_bytes); + gpointer g2 = g_malloc0(n_bytes); + g1 = g_realloc(g1, n_bytes * 2); + gpointer g3 = g_try_malloc(n_bytes); + gpointer g4 = g_try_malloc0(n_bytes); + g3 = g_try_realloc(g3, n_bytes * 2); + gpointer g5 = g_malloc_n(n_bytes, sizeof(char)); + gpointer g6 = g_malloc0_n(n_bytes, sizeof(char)); + g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g6'}} + gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char)); + gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char)); + g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g8'}} + + g_free(g1); // expected-warning{{Potential leak of memory pointed to by 'g7'}} + g_free(g2); + g_free(g3); + g_free(g4); + g_free(g5); +} + +void f6() { + gpointer g1 = g_malloc(n_bytes); + gpointer g2 = g_malloc0(n_bytes); + g1 = g_realloc(g1, n_bytes * 2); + gpointer g3 = g_try_malloc(n_bytes); + gpointer g4 = g_try_malloc0(n_bytes); + g3 = g_try_realloc(g3, n_bytes * 2); + gpointer g5 = g_malloc_n(n_bytes, sizeof(char)); + gpointer g6 = g_malloc0_n(n_bytes, sizeof(char)); + g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char)); + gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char)); + gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char)); + g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g8'}} + + g_free(g1); // expected-warning{{Potential leak of memory pointed to by 'g7'}} + g_free(g2); + g_free(g3); + g_free(g4); + g_free(g5); + g_free(g6); +} + +void f7() { + gpointer g1 = g_malloc(n_bytes); + gpointer g2 = g_malloc0(n_bytes); + g1 = g_realloc(g1, n_bytes * 2); + gpointer g3 = g_try_malloc(n_bytes); + gpointer g4 = g_try_malloc0(n_bytes); + g3 = g_try_realloc(g3, n_bytes * 2); + gpointer g5 = g_malloc_n(n_bytes, sizeof(char)); + gpointer g6 = g_malloc0_n(n_bytes, sizeof(char)); + g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char)); + gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char)); + gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char)); + g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g8'}} g_free(g1); g_free(g2); g_free(g3); + g_free(g4); + g_free(g5); + g_free(g6); + g_free(g7); } diff --git a/test/Analysis/inlining/inline-defensive-checks.c b/test/Analysis/inlining/inline-defensive-checks.c index 4029da6..010d3a7 100644 --- a/test/Analysis/inlining/inline-defensive-checks.c +++ b/test/Analysis/inlining/inline-defensive-checks.c @@ -1,7 +1,7 @@ // RUN: %clang_analyze_cc1 -analyzer-checker=core -analyzer-config suppress-inlined-defensive-checks=true -verify %s // Perform inline defensive checks. -void idc(int *p) { +void idc(void *p) { if (p) ; } @@ -139,3 +139,42 @@ void idcTrackZeroThroughDoubleAssignemnt(int x) { int z = y; idcTriggerZeroValueThroughCall(z); } + +struct S { + int f1; + int f2; +}; + +void idcTrackZeroValueThroughUnaryPointerOperators(struct S *s) { + idc(s); + *(&(s->f1)) = 7; // no-warning +} + +void idcTrackZeroValueThroughUnaryPointerOperatorsWithOffset1(struct S *s) { + idc(s); + int *x = &(s->f2); + *x = 7; // no-warning +} + +void idcTrackZeroValueThroughUnaryPointerOperatorsWithOffset2(struct S *s) { + idc(s); + int *x = &(s->f2) - 1; + // FIXME: Should not warn. + *x = 7; // expected-warning{{Dereference of null pointer}} +} + +void idcTrackZeroValueThroughUnaryPointerOperatorsWithAssignment(struct S *s) { + idc(s); + int *x = &(s->f1); + *x = 7; // no-warning +} + + +struct S2 { + int a[1]; +}; + +void idcTrackZeroValueThroughUnaryPointerOperatorsWithArrayField(struct S2 *s) { + idc(s); + *(&(s->a[0])) = 7; // no-warning +} diff --git a/test/Analysis/inlining/inline-defensive-checks.cpp b/test/Analysis/inlining/inline-defensive-checks.cpp index 6a803fa..eaae8d2 100644 --- a/test/Analysis/inlining/inline-defensive-checks.cpp +++ b/test/Analysis/inlining/inline-defensive-checks.cpp @@ -70,4 +70,17 @@ int *retNull() { void test(int *p1, int *p2) { idc(p1); Foo f(p1); -} \ No newline at end of file +} + +struct Bar { + int x; +}; +void idcBar(Bar *b) { + if (b) + ; +} +void testRefToField(Bar *b) { + idcBar(b); + int &x = b->x; // no-warning + x = 5; +} diff --git a/test/Analysis/null-deref-offsets.c b/test/Analysis/null-deref-offsets.c new file mode 100644 index 0000000..988cec4 --- /dev/null +++ b/test/Analysis/null-deref-offsets.c @@ -0,0 +1,37 @@ +// RUN: %clang_analyze_cc1 -w -triple i386-apple-darwin10 -analyzer-checker=core,debug.ExprInspection -verify %s + +void clang_analyzer_eval(int); + +struct S { + int x, y; + int z[2]; +}; + +void testOffsets(struct S *s, int coin) { + if (s != 0) + return; + + // FIXME: Here we are testing the hack that computes offsets to null pointers + // as 0 in order to find null dereferences of not-exactly-null pointers, + // such as &(s->y) below, which is equal to 4 rather than 0 in run-time. + + // These are indeed null. + clang_analyzer_eval(s == 0); // expected-warning{{TRUE}} + clang_analyzer_eval(&(s->x) == 0); // expected-warning{{TRUE}} + + // FIXME: These should ideally be true. + clang_analyzer_eval(&(s->y) == 4); // expected-warning{{FALSE}} + clang_analyzer_eval(&(s->z[0]) == 8); // expected-warning{{FALSE}} + clang_analyzer_eval(&(s->z[1]) == 12); // expected-warning{{FALSE}} + + // FIXME: These should ideally be false. + clang_analyzer_eval(&(s->y) == 0); // expected-warning{{TRUE}} + clang_analyzer_eval(&(s->z[0]) == 0); // expected-warning{{TRUE}} + clang_analyzer_eval(&(s->z[1]) == 0); // expected-warning{{TRUE}} + + // But these should still be reported as null dereferences. + if (coin) + s->y = 5; // expected-warning{{Access to field 'y' results in a dereference of a null pointer (loaded from variable 's')}} + else + s->z[1] = 6; // expected-warning{{Array access (via field 'z') results in a null pointer dereference}} +} diff --git a/test/Analysis/uninit-const.cpp b/test/Analysis/uninit-const.cpp index 75e932a..db969bf 100644 --- a/test/Analysis/uninit-const.cpp +++ b/test/Analysis/uninit-const.cpp @@ -122,7 +122,7 @@ void f1(void) { } void f_uninit(void) { - int x; + int x; // expected-note {{'x' declared without an initial value}} doStuff_uninit(&x); // expected-warning {{1st function call argument is a pointer to uninitialized value}} // expected-note@-1 {{1st function call argument is a pointer to uninitialized value}} } diff --git a/test/CXX/modules-ts/basic/basic.link/module-declaration.cpp b/test/CXX/modules-ts/basic/basic.link/module-declaration.cpp new file mode 100644 index 0000000..ee696d1 --- /dev/null +++ b/test/CXX/modules-ts/basic/basic.link/module-declaration.cpp @@ -0,0 +1,55 @@ +// Tests for module-declaration syntax. +// +// RUN: rm -rf %t +// RUN: mkdir -p %t +// RUN: echo 'export module x; int a, b;' > %t/x.cppm +// RUN: echo 'export module x.y; int c;' > %t/x.y.cppm +// +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %t/x.cppm -o %t/x.pcm +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface -fmodule-file=%t/x.pcm %t/x.y.cppm -o %t/x.y.pcm +// +// Module implementation for unknown and known module. (The former is ill-formed.) +// FIXME: TEST=1 should fail because we don't have an interface for module z. +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ +// RUN: -DTEST=1 -DEXPORT= -DPARTITION= -DMODULE_NAME=z +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ +// RUN: -DTEST=2 -DEXPORT= -DPARTITION= -DMODULE_NAME=x +// +// Module interface for unknown and known module. (The latter is ill-formed due to +// redefinition.) +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ +// RUN: -DTEST=3 -DEXPORT=export -DPARTITION= -DMODULE_NAME=z +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ +// RUN: -DTEST=4 -DEXPORT=export -DPARTITION= -DMODULE_NAME=x +// +// Defining a module partition. +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ +// RUN: -DTEST=5 -DEXPORT=export -DPARTITION=partition -DMODULE_NAME=z +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ +// RUN: -DTEST=6 -DEXPORT= -DPARTITION=partition -DMODULE_NAME=z +// +// Miscellaneous syntax. +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ +// RUN: -DTEST=7 -DEXPORT= -DPARTITION=elderberry -DMODULE_NAME=z +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ +// RUN: -DTEST=8 -DEXPORT= -DPARTITION= -DMODULE_NAME='z [[]]' +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ +// RUN: -DTEST=9 -DEXPORT= -DPARTITION= -DMODULE_NAME='z [[fancy]]' +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ +// RUN: -DTEST=10 -DEXPORT= -DPARTITION= -DMODULE_NAME='z [[maybe_unused]]' + +EXPORT module PARTITION MODULE_NAME; +#if TEST == 4 +// expected-error@-2 {{redefinition of module 'x'}} +// expected-note-re@module-declaration.cpp:* {{loaded from '{{.*}}/x.pcm'}} +#elif TEST == 6 +// expected-error@-5 {{module partition must be declared 'export'}} +#elif TEST == 7 +// expected-error@-7 {{expected ';'}} expected-error@-7 {{requires a type specifier}} +#elif TEST == 9 +// expected-warning@-9 {{unknown attribute 'fancy' ignored}} +#elif TEST == 10 +// expected-error-re@-11 {{'maybe_unused' attribute cannot be applied to a module{{$}}}} +#else +// expected-no-diagnostics +#endif diff --git a/test/CXX/modules-ts/codegen-basics.cppm b/test/CXX/modules-ts/codegen-basics.cppm new file mode 100644 index 0000000..d1552d9 --- /dev/null +++ b/test/CXX/modules-ts/codegen-basics.cppm @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -fmodules-ts -std=c++1z -triple=x86_64-linux-gnu -fmodules-codegen -emit-module-interface %s -o %t.pcm +// RUN: %clang_cc1 -fmodules-ts -std=c++1z -triple=x86_64-linux-gnu %t.pcm -emit-llvm -o - | FileCheck %s + +export module FooBar; + +export { + // CHECK-LABEL: define i32 @_Z1fv( + int f() { return 0; } +} + +// CHECK-LABEL: define weak_odr void @_Z2f2v( +inline void f2() { } + +// FIXME: Emit global variables and their initializers with this TU. +// Emit an initialization function that other TUs can call, with guard variable. + +// FIXME: Mangle non-exported symbols so they don't collide with +// non-exported symbols from other modules? + +// FIXME: Formally-internal-linkage symbols that are used from an exported +// symbol need a mangled name and external linkage. + +// FIXME: const-qualified variables don't have implicit internal linkage when owned by a module. diff --git a/test/CXX/modules-ts/dcl.dcl/dcl.module/dcl.module.import/p1.cpp b/test/CXX/modules-ts/dcl.dcl/dcl.module/dcl.module.import/p1.cpp new file mode 100644 index 0000000..aaf43d6 --- /dev/null +++ b/test/CXX/modules-ts/dcl.dcl/dcl.module/dcl.module.import/p1.cpp @@ -0,0 +1,41 @@ +// RUN: rm -rf %t +// RUN: mkdir -p %t +// RUN: echo 'export module x; int a, b;' > %t/x.cppm +// RUN: echo 'export module x.y; int c;' > %t/x.y.cppm +// +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %t/x.cppm -o %t/x.pcm +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface -fmodule-file=%t/x.pcm %t/x.y.cppm -o %t/x.y.pcm +// +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ +// RUN: -DMODULE_NAME=z +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ +// RUN: -DMODULE_X -DMODULE_NAME=x + +module MODULE_NAME; + +int use_1 = a; +#if !MODULE_X +// expected-error@-2 {{declaration of 'a' must be imported from module 'x' before it is required}} +// expected-note@x.cppm:1 {{here}} +#endif + +import x; + +int use_2 = b; // ok + +// There is no relation between module x and module x.y. +int use_3 = c; // expected-error {{declaration of 'c' must be imported from module 'x.y'}} + // expected-note@x.y.cppm:1 {{here}} + +import x [[]]; +import x [[foo]]; // expected-warning {{unknown attribute 'foo' ignored}} +import x [[noreturn]]; // expected-error {{'noreturn' attribute cannot be applied to a module import}} +import x [[blarg::noreturn]]; // expected-warning {{unknown attribute 'noreturn' ignored}} + +import x.y; +import x.; // expected-error {{expected a module name after 'import'}} +import .x; // expected-error {{expected a module name after 'import'}} + +int use_4 = c; // ok + +import blarg; // expected-error {{module 'blarg' not found}} diff --git a/test/CXX/modules-ts/dcl.dcl/dcl.module/dcl.module.interface/p1.cpp b/test/CXX/modules-ts/dcl.dcl/dcl.module/dcl.module.interface/p1.cpp new file mode 100644 index 0000000..afe3a7a --- /dev/null +++ b/test/CXX/modules-ts/dcl.dcl/dcl.module/dcl.module.interface/p1.cpp @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -fmodules-ts %s -verify -o /dev/null +// RUN: %clang_cc1 -fmodules-ts %s -DINTERFACE -verify -o /dev/null +// RUN: %clang_cc1 -fmodules-ts %s -DIMPLEMENTATION -verify -o /dev/null +// +// RUN: %clang_cc1 -fmodules-ts %s -DBUILT_AS_INTERFACE -emit-module-interface -verify -o /dev/null +// RUN: %clang_cc1 -fmodules-ts %s -DINTERFACE -DBUILT_AS_INTERFACE -emit-module-interface -verify -o /dev/null +// RUN: %clang_cc1 -fmodules-ts %s -DIMPLEMENTATION -DBUILT_AS_INTERFACE -emit-module-interface -verify -o /dev/null + +#if INTERFACE +export module A; +#elif IMPLEMENTATION +module A; + #ifdef BUILT_AS_INTERFACE + // expected-error@-2 {{missing 'export' specifier in module declaration while building module interface}} + #endif +#else + #ifdef BUILT_AS_INTERFACE + // FIXME: Diagnose missing module declaration (at end of TU) + #endif +#endif + +export int a; +#ifndef INTERFACE +// expected-error@-2 {{export declaration can only be used within a module interface unit}} +#else +// expected-no-diagnostics +#endif diff --git a/test/CodeGen/asan-globals-gc.cpp b/test/CodeGen/asan-globals-gc.cpp new file mode 100644 index 0000000..6d64f41 --- /dev/null +++ b/test/CodeGen/asan-globals-gc.cpp @@ -0,0 +1,7 @@ +// RUN: %clang_cc1 -fsanitize=address -emit-llvm -o - -triple x86_64-windows-msvc %s | FileCheck %s --check-prefix=WITH-GC +// RUN: %clang_cc1 -fsanitize=address -emit-llvm -o - -triple x86_64-windows-msvc -fdata-sections %s | FileCheck %s --check-prefix=WITH-GC + +int global; + +// WITH-GC-NOT: call void @__asan_register_globals +// WITHOUT-GC: call void @__asan_register_globals diff --git a/test/CodeGen/catch-undef-behavior.c b/test/CodeGen/catch-undef-behavior.c index c5f3a79..e67f0a1 100644 --- a/test/CodeGen/catch-undef-behavior.c +++ b/test/CodeGen/catch-undef-behavior.c @@ -36,13 +36,7 @@ void foo() { // CHECK-COMMON: %[[I8PTR:.*]] = bitcast i32* %[[PTR:.*]] to i8* // CHECK-COMMON-NEXT: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64.p0i8(i8* %[[I8PTR]], i1 false, i1 false) - // CHECK-COMMON-NEXT: %[[CHECK0:.*]] = icmp uge i64 %[[SIZE]], 4 - - // CHECK-COMMON: %[[PTRTOINT:.*]] = ptrtoint {{.*}}* %[[PTR]] to i64 - // CHECK-COMMON-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRTOINT]], 3 - // CHECK-COMMON-NEXT: %[[CHECK1:.*]] = icmp eq i64 %[[MISALIGN]], 0 - - // CHECK-COMMON: %[[OK:.*]] = and i1 %[[CHECK0]], %[[CHECK1]] + // CHECK-COMMON-NEXT: %[[OK:.*]] = icmp uge i64 %[[SIZE]], 4 // CHECK-UBSAN: br i1 %[[OK]], {{.*}} !prof ![[WEIGHT_MD:.*]], !nosanitize // CHECK-TRAP: br i1 %[[OK]], {{.*}} diff --git a/test/CodeGen/pgo-sample-thinlto-summary.c b/test/CodeGen/pgo-sample-thinlto-summary.c index a284af3..7782aeb 100644 --- a/test/CodeGen/pgo-sample-thinlto-summary.c +++ b/test/CodeGen/pgo-sample-thinlto-summary.c @@ -32,10 +32,10 @@ void unroll() { baz(i); } -// Checks if icp is invoked by normal compile, but not thinlto compile. +// Check that icp is not invoked (both -O2 and ThinLTO). // O2-LABEL: define void @icp // THINLTO-LABEL: define void @icp -// O2: if.true.direct_targ +// O2-NOT: if.true.direct_targ // ThinLTO-NOT: if.true.direct_targ void icp(void (*p)()) { p(); diff --git a/test/CodeGen/sanitize-recover.c b/test/CodeGen/sanitize-recover.c index 99eff85..d714d58 100644 --- a/test/CodeGen/sanitize-recover.c +++ b/test/CodeGen/sanitize-recover.c @@ -22,15 +22,7 @@ void foo() { // PARTIAL: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false, i1 false) // PARTIAL-NEXT: %[[CHECK0:.*]] = icmp uge i64 %[[SIZE]], 4 - // PARTIAL: %[[MISALIGN:.*]] = and i64 {{.*}}, 3 - // PARTIAL-NEXT: %[[CHECK1:.*]] = icmp eq i64 %[[MISALIGN]], 0 + // PARTIAL: br i1 %[[CHECK0]], {{.*}} !nosanitize - // PARTIAL: %[[CHECK01:.*]] = and i1 %[[CHECK1]], %[[CHECK0]] - - // PARTIAL: br i1 %[[CHECK01]], {{.*}} !nosanitize - // PARTIAL: br i1 %[[CHECK1]], {{.*}} !nosanitize - - // PARTIAL: call void @__ubsan_handle_type_mismatch_v1_abort( - // PARTIAL-NEXT: unreachable // PARTIAL: call void @__ubsan_handle_type_mismatch_v1( } diff --git a/test/CodeGen/split-debug-filename.c b/test/CodeGen/split-debug-filename.c index 43daead..9ca7b0f 100644 --- a/test/CodeGen/split-debug-filename.c +++ b/test/CodeGen/split-debug-filename.c @@ -1,7 +1,12 @@ // RUN: %clang_cc1 -debug-info-kind=limited -split-dwarf-file foo.dwo -S -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -debug-info-kind=limited -enable-split-dwarf -split-dwarf-file foo.dwo -S -emit-llvm -o - %s | FileCheck --check-prefix=VANILLA %s int main (void) { return 0; } // Testing to ensure that the dwo name gets output into the compile unit. // CHECK: !DICompileUnit({{.*}}, splitDebugFilename: "foo.dwo" + +// Testing to ensure that the dwo name is not output into the compile unit if +// it's for vanilla split-dwarf rather than split-dwarf for implicit modules. +// VANILLA-NOT: splitDebugFilename diff --git a/test/CodeGenCXX/invariant.group-for-vptrs.cpp b/test/CodeGenCXX/invariant.group-for-vptrs.cpp index 5c76353..4e5d9b4 100644 --- a/test/CodeGenCXX/invariant.group-for-vptrs.cpp +++ b/test/CodeGenCXX/invariant.group-for-vptrs.cpp @@ -12,15 +12,15 @@ struct D : A { void testExternallyVisible() { A *a = new A; - // CHECK: load {{.*}} !invariant.group ![[A_MD:[0-9]+]] + // CHECK: load {{.*}} !invariant.group ![[MD:[0-9]+]] a->foo(); D *d = new D; // CHECK: call void @_ZN1DC1Ev( - // CHECK: load {{.*}} !invariant.group ![[D_MD:[0-9]+]] + // CHECK: load {{.*}} !invariant.group ![[MD]] d->foo(); A *a2 = d; - // CHECK: load {{.*}} !invariant.group ![[A_MD]] + // CHECK: load {{.*}} !invariant.group ![[MD]] a2->foo(); } // CHECK-LABEL: {{^}}} @@ -40,35 +40,32 @@ struct C : B { // CHECK-LABEL: define void @_Z21testInternallyVisibleb( void testInternallyVisible(bool p) { B *b = new B; - // CHECK: = load {{.*}}, !invariant.group ![[B_MD:[0-9]+]] + // CHECK: = load {{.*}}, !invariant.group ![[MD]] b->bar(); // CHECK: call void @_ZN12_GLOBAL__N_11CC1Ev( C *c = new C; - // CHECK: = load {{.*}}, !invariant.group ![[C_MD:[0-9]+]] + // CHECK: = load {{.*}}, !invariant.group ![[MD]] c->bar(); } // Checking A::A() // CHECK-LABEL: define linkonce_odr void @_ZN1AC2Ev( -// CHECK: store {{.*}}, !invariant.group ![[A_MD]] +// CHECK: store {{.*}}, !invariant.group ![[MD]] // CHECK-LABEL: {{^}}} // Checking D::D() // CHECK-LABEL: define linkonce_odr void @_ZN1DC2Ev( // CHECK: = call i8* @llvm.invariant.group.barrier(i8* // CHECK: call void @_ZN1AC2Ev(%struct.A* -// CHECK: store {{.*}} !invariant.group ![[D_MD]] +// CHECK: store {{.*}} !invariant.group ![[MD]] // Checking B::B() // CHECK-LABEL: define internal void @_ZN12_GLOBAL__N_11BC2Ev( -// CHECK: store {{.*}}, !invariant.group ![[B_MD]] +// CHECK: store {{.*}}, !invariant.group ![[MD]] // Checking C::C() // CHECK-LABEL: define internal void @_ZN12_GLOBAL__N_11CC2Ev( -// CHECK: store {{.*}}, !invariant.group ![[C_MD]] +// CHECK: store {{.*}}, !invariant.group ![[MD]] -// CHECK: ![[A_MD]] = !{!"_ZTS1A"} -// CHECK: ![[D_MD]] = !{!"_ZTS1D"} -// CHECK: ![[B_MD]] = distinct !{} -// CHECK: ![[C_MD]] = distinct !{} +// CHECK: ![[MD]] = !{} diff --git a/test/CodeGenCXX/modules-ts.cppm b/test/CodeGenCXX/modules-ts.cppm deleted file mode 100644 index 90f6b53..0000000 --- a/test/CodeGenCXX/modules-ts.cppm +++ /dev/null @@ -1,23 +0,0 @@ -// RUN: %clang_cc1 -fmodules-ts -std=c++1z -triple=x86_64-linux-gnu -fmodules-codegen -emit-module-interface %s -o %t.pcm -// RUN: %clang_cc1 -fmodules-ts -std=c++1z -triple=x86_64-linux-gnu %t.pcm -emit-llvm -o - | FileCheck %s - -module FooBar; - -export { - // CHECK-LABEL: define i32 @_Z1fv( - int f() { return 0; } -} - -// CHECK-LABEL: define weak_odr void @_Z2f2v( -inline void f2() { } - -// FIXME: Emit global variables and their initializers with this TU. -// Emit an initialization function that other TUs can call, with guard variable. - -// FIXME: Mangle non-exported symbols so they don't collide with -// non-exported symbols from other modules? - -// FIXME: Formally-internal-linkage symbols that are used from an exported -// symbol need a mangled name and external linkage. - -// FIXME: const-qualified variables don't have implicit internal linkage when owned by a module. diff --git a/test/CodeGenCXX/ubsan-suppress-checks.cpp b/test/CodeGenCXX/ubsan-suppress-checks.cpp index d0e7b32..ae7c94b 100644 --- a/test/CodeGenCXX/ubsan-suppress-checks.cpp +++ b/test/CodeGenCXX/ubsan-suppress-checks.cpp @@ -12,6 +12,7 @@ void load_non_null_pointers() { char c = "foo"[0]; + // CHECK-NOT: and i64 {{.*}}, !nosanitize // CHECK-NOT: icmp ne {{.*}}, null, !nosanitize // CHECK: ret void } @@ -43,10 +44,6 @@ struct A { }; f(); - // LAMBDA: %[[LAMBDAINT:[0-9]+]] = ptrtoint %class.anon* %[[FUNCVAR:.*]] to i64, !nosanitize - // LAMBDA: and i64 %[[LAMBDAINT]], 7, !nosanitize - // LAMBDA: call void @__ubsan_handle_type_mismatch - // LAMBDA-NOT: call void @__ubsan_handle_type_mismatch // LAMBDA: ret void } diff --git a/test/CoverageMapping/empty-destructor.cpp b/test/CoverageMapping/empty-destructor.cpp new file mode 100644 index 0000000..cfc29a7 --- /dev/null +++ b/test/CoverageMapping/empty-destructor.cpp @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -triple i686-windows -emit-llvm-only -fcoverage-mapping -dump-coverage-mapping -fprofile-instrument=clang %s | FileCheck %s + +struct A { + virtual ~A(); +}; + +// CHECK: ?PR32761@@YAXXZ: +// CHECK-NEXT: File 0, [[@LINE+1]]:16 -> [[@LINE+3]]:2 = #0 +void PR32761() { + A a; +} diff --git a/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/crt1.o b/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/crt1.o new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/crt1.o diff --git a/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/crti.o b/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/crti.o new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/crti.o diff --git a/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/crtn.o b/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/crtn.o new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/crtn.o diff --git a/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5/crtbegin.o b/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5/crtbegin.o new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5/crtbegin.o diff --git a/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5/crtend.o b/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5/crtend.o new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/Driver/Inputs/opensuse_tumbleweed_armv6hl_tree/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5/crtend.o diff --git a/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/crt1.o b/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/crt1.o new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/crt1.o diff --git a/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/crti.o b/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/crti.o new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/crti.o diff --git a/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/crtn.o b/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/crtn.o new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/crtn.o diff --git a/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5/crtbegin.o b/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5/crtbegin.o new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5/crtbegin.o diff --git a/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5/crtend.o b/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5/crtend.o new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/Driver/Inputs/opensuse_tumbleweed_armv7hl_tree/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5/crtend.o diff --git a/test/Driver/fsanitize.c b/test/Driver/fsanitize.c index b6304b5..05e239c 100644 --- a/test/Driver/fsanitize.c +++ b/test/Driver/fsanitize.c @@ -358,6 +358,27 @@ // RUN: %clang -target i386-pc-openbsd -fsanitize=address %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-ASAN-OPENBSD // CHECK-ASAN-OPENBSD: unsupported option '-fsanitize=address' for target 'i386-pc-openbsd' +// RUN: %clang -target x86_64-apple-darwin -fsanitize=leak %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LSAN-X86-64-DARWIN +// CHECK-LSAN-X86-64-DARWIN-NOT: unsupported option + +// RUN: %clang -target x86_64-apple-iossimulator -fsanitize=leak %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LSAN-X86-64-IOSSIMULATOR +// CHECK-LSAN-X86-64-IOSSIMULATOR-NOT: unsupported option + +// RUN: %clang -target x86_64-apple-tvossimulator -fsanitize=leak %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LSAN-X86-64-TVOSSIMULATOR +// CHECK-LSAN-X86-64-TVOSSIMULATOR-NOT: unsupported option + +// RUN: %clang -target i386-apple-darwin -fsanitize=leak %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LSAN-I386-DARWIN +// CHECK-LSAN-I386-DARWIN-NOT: unsupported option + +// RUN: %clang -target arm-apple-ios -fsanitize=leak %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LSAN-ARM-IOS +// CHECK-LSAN-ARM-IOS-NOT: unsupported option + +// RUN: %clang -target i386-apple-iossimulator -fsanitize=leak %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LSAN-I386-IOSSIMULATOR +// CHECK-LSAN-I386-IOSSIMULATOR-NOT: unsupported option + +// RUN: %clang -target i386-apple-tvossimulator -fsanitize=leak %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LSAN-I386-TVOSSIMULATOR +// CHECK-LSAN-I386-TVOSSIMULATOR-NOT: unsupported option + // RUN: %clang -target i686-linux-gnu -fsanitize=efficiency-cache-frag %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-ESAN-X86 // RUN: %clang -target i686-linux-gnu -fsanitize=efficiency-working-set %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-ESAN-X86 // CHECK-ESAN-X86: error: unsupported option '-fsanitize=efficiency-{{.*}}' for target 'i686--linux-gnu' diff --git a/test/Driver/fuzzer.c b/test/Driver/fuzzer.c new file mode 100644 index 0000000..e534a73 --- /dev/null +++ b/test/Driver/fuzzer.c @@ -0,0 +1,20 @@ +// Test flags inserted by -fsanitize=fuzzer. + +// RUN: %clang -fsanitize=fuzzer %s -target x86_64-apple-darwin14 -### 2>&1 | FileCheck --check-prefixes=CHECK-FUZZER-LIB,CHECK-COVERAGE-FLAGS %s +// +// CHECK-FUZZER-LIB: libLLVMFuzzer.a +// CHECK-COVERAGE: -fsanitize-coverage-trace-pc-guard +// CHECK-COVERAGE-SAME: -fsanitize-coverage-indirect-calls +// CHECK-COVERAGE-SAME: -fsanitize-coverage-trace-cmp + +// RUN: %clang -fsanitize=fuzzer -target i386-unknown-linux -stdlib=platform %s -### 2>&1 | FileCheck --check-prefixes=CHECK-LIBCXX-LINUX %s +// +// CHECK-LIBCXX-LINUX: -lstdc++ + +// RUN: %clang -target x86_64-apple-darwin14 -fsanitize=fuzzer %s -### 2>&1 | FileCheck --check-prefixes=CHECK-LIBCXX-DARWIN %s +// +// CHECK-LIBCXX-DARWIN: -lc++ + +int LLVMFuzzerTestOneInput(const char *Data, long Size) { + return 0; +} diff --git a/test/Driver/hexagon-toolchain-elf.c b/test/Driver/hexagon-toolchain-elf.c index f6f2191..9858245 100644 --- a/test/Driver/hexagon-toolchain-elf.c +++ b/test/Driver/hexagon-toolchain-elf.c @@ -97,6 +97,22 @@ // CHECK024: "-cc1" {{.*}} "-target-cpu" "hexagonv62" // CHECK024: hexagon-link{{.*}}/Inputs/hexagon_tree/Tools/bin/../target/hexagon/lib/v62/crt0 +// RUN: %clang -### -target hexagon-unknown-elf \ +// RUN: -ccc-install-dir %S/Inputs/hexagon_tree/Tools/bin \ +// RUN: -O3 \ +// RUN: %s 2>&1 \ +// RUN: | FileCheck -check-prefix=CHECK025 %s +// CHECK025: "-ffp-contract=fast" +// CHECK025: hexagon-link + +// RUN: %clang -### -target hexagon-unknown-elf \ +// RUN: -ccc-install-dir %S/Inputs/hexagon_tree/Tools/bin \ +// RUN: -O3 -ffp-contract=off \ +// RUN: %s 2>&1 \ +// RUN: | FileCheck -check-prefix=CHECK026 %s +// CHECK026-NOT: "-ffp-contract=fast" +// CHECK026: hexagon-link + // ----------------------------------------------------------------------------- // Test Linker related args // ----------------------------------------------------------------------------- diff --git a/test/Driver/linux-ld.c b/test/Driver/linux-ld.c index e5aa870..1c5f1a4 100644 --- a/test/Driver/linux-ld.c +++ b/test/Driver/linux-ld.c @@ -638,6 +638,46 @@ // CHECK-OPENSUSE-42-2-AARCH64: "{{.*}}/usr/lib64/gcc/aarch64-suse-linux/4.8{{/|\\\\}}crtend.o" // CHECK-OPENSUSE-42-2-AARCH64: "{{.*}}/usr/lib64/gcc/aarch64-suse-linux/4.8/../../../../lib64{{/|\\\\}}crtn.o" // +// Check openSUSE Tumbleweed on armv6hl +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: --target=armv6hl-suse-linux-gnueabi \ +// RUN: --gcc-toolchain="" \ +// RUN: --sysroot=%S/Inputs/opensuse_tumbleweed_armv6hl_tree \ +// RUN: | FileCheck --check-prefix=CHECK-OPENSUSE-TW-ARMV6HL %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: --target=armv6hl-suse-linux-gnueabi \ +// RUN: --gcc-toolchain="" \ +// RUN: --sysroot=%S/Inputs/opensuse_tumbleweed_armv6hl_tree \ +// RUN: | FileCheck --check-prefix=CHECK-OPENSUSE-TW-ARMV6HL %s +// CHECK-OPENSUSE-TW-ARMV6HL: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-OPENSUSE-TW-ARMV6HL: "{{.*}}/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5/../../../../lib{{/|\\\\}}crt1.o" +// CHECK-OPENSUSE-TW-ARMV6HL: "{{.*}}/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5/../../../../lib{{/|\\\\}}crti.o" +// CHECK-OPENSUSE-TW-ARMV6HL: "{{.*}}/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5{{/|\\\\}}crtbegin.o" +// CHECK-OPENSUSE-TW-ARMV6HL: "-L[[SYSROOT]]/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5" +// CHECK-OPENSUSE-TW-ARMV6HL: "-L[[SYSROOT]]/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5/../../../../lib" +// CHECK-OPENSUSE-TW-ARMV6HL: "{{.*}}/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5{{/|\\\\}}crtend.o" +// CHECK-OPENSUSE-TW-ARMV6HL: "{{.*}}/usr/lib/gcc/armv6hl-suse-linux-gnueabi/5/../../../../lib{{/|\\\\}}crtn.o" +// +// Check openSUSE Tumbleweed on armv7hl +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: --target=armv7hl-suse-linux-gnueabi \ +// RUN: --gcc-toolchain="" \ +// RUN: --sysroot=%S/Inputs/opensuse_tumbleweed_armv7hl_tree \ +// RUN: | FileCheck --check-prefix=CHECK-OPENSUSE-TW-ARMV7HL %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: --target=armv7hl-suse-linux-gnueabi \ +// RUN: --gcc-toolchain="" \ +// RUN: --sysroot=%S/Inputs/opensuse_tumbleweed_armv7hl_tree \ +// RUN: | FileCheck --check-prefix=CHECK-OPENSUSE-TW-ARMV7HL %s +// CHECK-OPENSUSE-TW-ARMV7HL: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-OPENSUSE-TW-ARMV7HL: "{{.*}}/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5/../../../../lib{{/|\\\\}}crt1.o" +// CHECK-OPENSUSE-TW-ARMV7HL: "{{.*}}/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5/../../../../lib{{/|\\\\}}crti.o" +// CHECK-OPENSUSE-TW-ARMV7HL: "{{.*}}/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5{{/|\\\\}}crtbegin.o" +// CHECK-OPENSUSE-TW-ARMV7HL: "-L[[SYSROOT]]/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5" +// CHECK-OPENSUSE-TW-ARMV7HL: "-L[[SYSROOT]]/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5/../../../../lib" +// CHECK-OPENSUSE-TW-ARMV7HL: "{{.*}}/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5{{/|\\\\}}crtend.o" +// CHECK-OPENSUSE-TW-ARMV7HL: "{{.*}}/usr/lib/gcc/armv7hl-suse-linux-gnueabi/5/../../../../lib{{/|\\\\}}crtn.o" +// // Check dynamic-linker for different archs // RUN: %clang %s -### -o %t.o 2>&1 \ // RUN: --target=arm-linux-gnueabi \ diff --git a/test/Driver/modules-ts.cpp b/test/Driver/modules-ts.cpp index fd33914..3847b71 100644 --- a/test/Driver/modules-ts.cpp +++ b/test/Driver/modules-ts.cpp @@ -1,6 +1,6 @@ // Check compiling a module interface to a .pcm file. // -// RUN: %clang -fmodules-ts -x c++-module --precompile %s -Dimplementation= -o %t.pcm -v 2>&1 | FileCheck %s --check-prefix=CHECK-PRECOMPILE +// RUN: %clang -fmodules-ts -x c++-module --precompile %s -o %t.pcm -v 2>&1 | FileCheck %s --check-prefix=CHECK-PRECOMPILE // // CHECK-PRECOMPILE: -cc1 {{.*}} -emit-module-interface // CHECK-PRECOMPILE-SAME: -o {{.*}}.pcm @@ -18,7 +18,7 @@ // Check use of a .pcm file in another compilation. // -// RUN: %clang -fmodules-ts -fmodule-file=%t.pcm %s -c -o %t.o -v 2>&1 | FileCheck %s --check-prefix=CHECK-USE +// RUN: %clang -fmodules-ts -fmodule-file=%t.pcm -Dexport= %s -c -o %t.o -v 2>&1 | FileCheck %s --check-prefix=CHECK-USE // // CHECK-USE: -cc1 // CHECK-USE-SAME: -emit-obj @@ -28,11 +28,11 @@ // Check combining precompile and compile steps works. // -// RUN: %clang -fmodules-ts -x c++-module %s -Dimplementation= -c -o %t.pcm.o -v 2>&1 | FileCheck %s --check-prefix=CHECK-PRECOMPILE --check-prefix=CHECK-COMPILE +// RUN: %clang -fmodules-ts -x c++-module %s -c -o %t.pcm.o -v 2>&1 | FileCheck %s --check-prefix=CHECK-PRECOMPILE --check-prefix=CHECK-COMPILE // Check that .cppm is treated as a module implicitly. // RUN: cp %s %t.cppm -// RUN: %clang -fmodules-ts --precompile %t.cppm -Dimplementation= -o %t.pcm -v 2>&1 | FileCheck %s --check-prefix=CHECK-PRECOMPILE +// RUN: %clang -fmodules-ts --precompile %t.cppm -o %t.pcm -v 2>&1 | FileCheck %s --check-prefix=CHECK-PRECOMPILE -// Note, we use -Dimplementation= to make this a valid module interface unit when building the interface. -module implementation foo; +// Note, we use -Dexport= to make this a module implementation unit when building the implementation. +export module foo; diff --git a/test/Driver/sanitizer-ld.c b/test/Driver/sanitizer-ld.c index c4a6f43..efdd124 100644 --- a/test/Driver/sanitizer-ld.c +++ b/test/Driver/sanitizer-ld.c @@ -409,6 +409,15 @@ // CHECK-ASAN-DARWIN106-CXX: libclang_rt.asan_osx_dynamic.dylib // CHECK-ASAN-DARWIN106-CXX-NOT: -lc++abi +// RUN: %clangxx -fsanitize=leak %s -### -o %t.o 2>&1 \ +// RUN: -mmacosx-version-min=10.6 \ +// RUN: -target x86_64-apple-darwin13.4.0 -fuse-ld=ld -stdlib=platform \ +// RUN: --sysroot=%S/Inputs/basic_linux_tree \ +// RUN: | FileCheck --check-prefix=CHECK-LSAN-DARWIN106-CXX %s +// CHECK-LSAN-DARWIN106-CXX: "{{.*}}ld{{(.exe)?}}" +// CHECK-LSAN-DARWIN106-CXX: libclang_rt.lsan_osx_dynamic.dylib +// CHECK-LSAN-DARWIN106-CXX-NOT: -lc++abi + // RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ // RUN: -target x86_64-unknown-linux -fuse-ld=ld -fsanitize=safe-stack \ // RUN: --sysroot=%S/Inputs/basic_linux_tree \ diff --git a/test/Driver/split-debug.c b/test/Driver/split-debug.c index eaa1e99..c12164b 100644 --- a/test/Driver/split-debug.c +++ b/test/Driver/split-debug.c @@ -36,53 +36,53 @@ // RUN: %clang -target x86_64-unknown-linux-gnu -gsplit-dwarf -gmlt -fno-split-dwarf-inlining -S -### %s 2> %t // RUN: FileCheck -check-prefix=CHECK-GMLT-WITH-SPLIT < %t %s // -// CHECK-GMLT-WITH-SPLIT: "-split-dwarf=Enable" +// CHECK-GMLT-WITH-SPLIT: "-enable-split-dwarf" // CHECK-GMLT-WITH-SPLIT: "-debug-info-kind=line-tables-only" // CHECK-GMLT-WITH-SPLIT: "-split-dwarf-file" // RUN: %clang -target x86_64-unknown-linux-gnu -gmlt -gsplit-dwarf -fno-split-dwarf-inlining -S -### %s 2> %t // RUN: FileCheck -check-prefix=CHECK-SPLIT-WITH-GMLT < %t %s // -// CHECK-SPLIT-WITH-GMLT: "-split-dwarf=Enable" +// CHECK-SPLIT-WITH-GMLT: "-enable-split-dwarf" // CHECK-SPLIT-WITH-GMLT: "-debug-info-kind=line-tables-only" // CHECK-SPLIT-WITH-GMLT: "-split-dwarf-file" // RUN: %clang -target x86_64-unknown-linux-gnu -gsplit-dwarf -fno-split-dwarf-inlining -S -### %s 2> %t // RUN: FileCheck -check-prefix=CHECK-SPLIT-WITH-NOINL < %t %s // -// CHECK-SPLIT-WITH-NOINL: "-split-dwarf=Enable" +// CHECK-SPLIT-WITH-NOINL: "-enable-split-dwarf" // CHECK-SPLIT-WITH-NOINL: "-debug-info-kind=limited" // CHECK-SPLIT-WITH-NOINL: "-split-dwarf-file" // RUN: %clang -target x86_64-unknown-linux-gnu -gsplit-dwarf -gmlt -S -### %s 2> %t // RUN: FileCheck -check-prefix=CHECK-GMLT-OVER-SPLIT < %t %s // -// CHECK-GMLT-OVER-SPLIT-NOT: "-split-dwarf=Enable" +// CHECK-GMLT-OVER-SPLIT-NOT: "-enable-split-dwarf" // CHECK-GMLT-OVER-SPLIT: "-debug-info-kind=line-tables-only" // CHECK-GMLT-OVER-SPLIT-NOT: "-split-dwarf-file" // RUN: %clang -target x86_64-unknown-linux-gnu -gmlt -gsplit-dwarf -S -### %s 2> %t // RUN: FileCheck -check-prefix=CHECK-SPLIT-OVER-GMLT < %t %s // -// CHECK-SPLIT-OVER-GMLT: "-split-dwarf=Enable" "-debug-info-kind=limited" +// CHECK-SPLIT-OVER-GMLT: "-enable-split-dwarf" "-debug-info-kind=limited" // CHECK-SPLIT-OVER-GMLT: "-split-dwarf-file" // RUN: %clang -target x86_64-unknown-linux-gnu -gsplit-dwarf -g0 -fno-split-dwarf-inlining -S -### %s 2> %t // RUN: FileCheck -check-prefix=CHECK-G0-OVER-SPLIT < %t %s // -// CHECK-G0-OVER-SPLIT-NOT: "-split-dwarf=Enable" +// CHECK-G0-OVER-SPLIT-NOT: "-enable-split-dwarf" // CHECK-G0-OVER-SPLIT-NOT: "-debug-info-kind // CHECK-G0-OVER-SPLIT-NOT: "-split-dwarf-file" // RUN: %clang -target x86_64-unknown-linux-gnu -gsplit-dwarf -g0 -S -### %s 2> %t // RUN: FileCheck -check-prefix=CHECK-G0-OVER-SPLIT < %t %s // -// CHECK-G0-OVER-SPLIT-NOT: "-split-dwarf=Enable" +// CHECK-G0-OVER-SPLIT-NOT: "-enable-split-dwarf" // CHECK-G0-OVER-SPLIT-NOT: "-debug-info-kind // CHECK-G0-OVER-SPLIT-NOT: "-split-dwarf-file" // RUN: %clang -target x86_64-unknown-linux-gnu -g0 -gsplit-dwarf -S -### %s 2> %t // RUN: FileCheck -check-prefix=CHECK-SPLIT-OVER-G0 < %t %s // -// CHECK-SPLIT-OVER-G0: "-split-dwarf=Enable" "-debug-info-kind=limited" +// CHECK-SPLIT-OVER-G0: "-enable-split-dwarf" "-debug-info-kind=limited" // CHECK-SPLIT-OVER-G0: "-split-dwarf-file" diff --git a/test/Format/incomplete.cpp b/test/Format/incomplete.cpp index 8a65fad..f92d576 100644 --- a/test/Format/incomplete.cpp +++ b/test/Format/incomplete.cpp @@ -1,6 +1,6 @@ // RUN: grep -Ev "// *[A-Z-]+:" %s | clang-format -style=LLVM -cursor=0 \ // RUN: | FileCheck -strict-whitespace %s -// CHECK: {{"IncompleteFormat": true}} +// CHECK: {{"IncompleteFormat": true, "Line": 2}} // CHECK: {{^int\ \i;$}} int i; // CHECK: {{^f \( g \(;$}} diff --git a/test/Headers/stdatomic.c b/test/Headers/stdatomic.c index 76112cf..3643fd4 100644 --- a/test/Headers/stdatomic.c +++ b/test/Headers/stdatomic.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c11 -E %s | FileCheck %s +// RUN: %clang_cc1 -std=c11 -fms-compatibility -E %s | FileCheck %s #include int bool_lock_free = ATOMIC_BOOL_LOCK_FREE; diff --git a/test/Index/Core/external-source-symbol-attr.m b/test/Index/Core/external-source-symbol-attr.m new file mode 100644 index 0000000..49a075f --- /dev/null +++ b/test/Index/Core/external-source-symbol-attr.m @@ -0,0 +1,100 @@ +// RUN: c-index-test core -print-source-symbols -- %s -target x86_64-apple-macosx10.7 | FileCheck %s + +#define EXT_DECL(mod_name) __attribute__((external_source_symbol(language="Swift", defined_in=mod_name))) +#define GEN_DECL(mod_name) __attribute__((external_source_symbol(language="Swift", defined_in=mod_name, generated_declaration))) +#define PUSH_GEN_DECL(mod_name) push(GEN_DECL(mod_name), apply_to=any(enum, objc_interface, objc_category, objc_protocol)) + +// This should not be indexed. +GEN_DECL("some_module") +@interface I1 +// CHECK-NOT: [[@LINE-1]]:12 | +-(void)method; +// CHECK-NOT: [[@LINE-1]]:8 | +@end + +EXT_DECL("some_module") +@interface I2 +// CHECK: [[@LINE-1]]:12 | class/Swift | I2 | c:@M@some_module@objc(cs)I2 | {{.*}} | Decl | rel: 0 +-(void)method; +// CHECK: [[@LINE-1]]:8 | instance-method/Swift | method | c:@M@some_module@objc(cs)I2(im)method | -[I2 method] | Decl,Dyn,RelChild | rel: 1 +@end + +void test1(I1 *o) { +// CHECK: [[@LINE-1]]:12 | class/Swift | I1 | c:@M@some_module@objc(cs)I1 | + [o method]; + // CHECK: [[@LINE-1]]:6 | instance-method/Swift | method | c:@M@some_module@objc(cs)I1(im)method | +} + +EXT_DECL("some_module") +@protocol ExtProt +// CHECK: [[@LINE-1]]:11 | protocol/Swift | ExtProt | c:@M@some_module@objc(pl)ExtProt | +@end + +@interface I1(cat) +// CHECK: [[@LINE-1]]:15 | extension/ObjC | cat | c:@M@some_module@objc(cy)I1@cat | +-(void)cat_method; +// CHECK: [[@LINE-1]]:8 | instance-method/ObjC | cat_method | c:@M@some_module@objc(cs)I1(im)cat_method +@end + +EXT_DECL("cat_module") +@interface I1(cat2) +// CHECK: [[@LINE-1]]:15 | extension/Swift | cat2 | c:@CM@cat_module@some_module@objc(cy)I1@cat2 | +-(void)cat_method2; +// CHECK: [[@LINE-1]]:8 | instance-method/Swift | cat_method2 | c:@CM@cat_module@some_module@objc(cs)I1(im)cat_method2 +@end + +#define NS_ENUM(_name, _type) enum _name:_type _name; enum _name : _type + +#pragma clang attribute PUSH_GEN_DECL("modname") + +@interface I3 +// CHECK-NOT: [[@LINE-1]]:12 | +-(void)meth; +// CHECK-NOT: [[@LINE-1]]:8 | +@end + +@interface I3(cat) +// CHECK-NOT: [[@LINE-1]]:12 | +// CHECK-NOT: [[@LINE-2]]:15 | +-(void)meth2; +// CHECK-NOT: [[@LINE-1]]:8 | +@end + +@protocol ExtProt2 +// CHECK-NOT: [[@LINE-1]]:11 | +-(void)meth; +// CHECK-NOT: [[@LINE-1]]:8 | +@end + +typedef NS_ENUM(SomeEnum, int) { +// CHECK-NOT: [[@LINE-1]]:17 | + SomeEnumFirst = 0, + // CHECK-NOT: [[@LINE-1]]:3 | +}; + +#pragma clang attribute pop + +void test2(I3 *i3, id prot2, SomeEnum some) { + // CHECK: [[@LINE-1]]:12 | class/Swift | I3 | c:@M@modname@objc(cs)I3 | + // CHECK: [[@LINE-2]]:23 | protocol/Swift | ExtProt2 | c:@M@modname@objc(pl)ExtProt2 | + // CHECK: [[@LINE-3]]:40 | enum/Swift | SomeEnum | c:@M@modname@E@SomeEnum | + [i3 meth]; + // CHECK: [[@LINE-1]]:7 | instance-method/Swift | meth | c:@M@modname@objc(cs)I3(im)meth | + [i3 meth2]; + // CHECK: [[@LINE-1]]:7 | instance-method/Swift | meth2 | c:@CM@modname@objc(cs)I3(im)meth2 | + [prot2 meth]; + // CHECK: [[@LINE-1]]:10 | instance-method/Swift | meth | c:@M@modname@objc(pl)ExtProt2(im)meth | + some = SomeEnumFirst; + // CHECK: [[@LINE-1]]:10 | enumerator/Swift | SomeEnumFirst | c:@M@modname@E@SomeEnum@SomeEnumFirst | +} + +#pragma clang attribute PUSH_GEN_DECL("other_mod_for_cat") +@interface I3(cat_other_mod) +-(void)meth_other_mod; +@end +#pragma clang attribute pop + +void test3(I3 *i3) { + [i3 meth_other_mod]; + // CHECK: [[@LINE-1]]:7 | instance-method/Swift | meth_other_mod | c:@CM@other_mod_for_cat@modname@objc(cs)I3(im)meth_other_mod | +} diff --git a/test/Index/Core/index-source.cpp b/test/Index/Core/index-source.cpp index 5d4a1be..7bb366c 100644 --- a/test/Index/Core/index-source.cpp +++ b/test/Index/Core/index-source.cpp @@ -111,3 +111,180 @@ template<> class PartialSpecilizationClass { }; // CHECK: [[@LINE-1]]:7 | class(Gen,TS)/C++ | PartialSpecilizationClass | c:@S@PartialSpecilizationClass>#I#I | | Def,RelSpecialization | rel: 1 // CHECK-NEXT: RelSpecialization | PartialSpecilizationClass | c:@ST>2#T#T@PartialSpecilizationClass + +template +class PseudoOverridesInSpecializations { + void function() { } + void function(int) { } + + static void staticFunction() { } + + int field; + static int variable; + + typedef T TypeDef; + using TypeAlias = T; + + enum anEnum { }; + + struct Struct { }; + union Union { }; + + using TypealiasOrRecord = void; + + template struct InnerTemplate { }; + template struct InnerTemplate { }; +}; + +template<> +class PseudoOverridesInSpecializations { + void function() { } +// CHECK: [[@LINE-1]]:8 | instance-method/C++ | function | c:@S@PseudoOverridesInSpecializations>#d#I@F@function# | __ZN32PseudoOverridesInSpecializationsIdiE8functionEv | Def,RelChild,RelOver,RelSpecialization | rel: 2 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelOver,RelSpecialization | function | c:@ST>2#T#T@PseudoOverridesInSpecializations@F@function# + + void staticFunction() { } +// CHECK: [[@LINE-1]]:8 | instance-method/C++ | staticFunction | c:@S@PseudoOverridesInSpecializations>#d#I@F@staticFunction# | __ZN32PseudoOverridesInSpecializationsIdiE14staticFunctionEv | Def,RelChild | rel: 1 +// CHECK-NOT: RelOver + + int notOverridingField = 0; + +// CHECK-LABEL: checLabelBreak + int checLabelBreak = 0; + + int field = 0; +// CHECK: [[@LINE-1]]:7 | field/C++ | field | c:@S@PseudoOverridesInSpecializations>#d#I@FI@field | | Def,RelChild,RelOver,RelSpecialization | rel: 2 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelOver,RelSpecialization | field | c:@ST>2#T#T@PseudoOverridesInSpecializations@FI@field + + static double variable; +// CHECK: [[@LINE-1]]:17 | static-property/C++ | variable | c:@S@PseudoOverridesInSpecializations>#d#I@variable | __ZN32PseudoOverridesInSpecializationsIdiE8variableE | Decl,RelChild,RelOver,RelSpecialization | rel: 2 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelOver,RelSpecialization | variable | c:@ST>2#T#T@PseudoOverridesInSpecializations@variable + + typedef double TypeDef; +// CHECK: [[@LINE-1]]:18 | type-alias/C | TypeDef | c:index-source.cpp@S@PseudoOverridesInSpecializations>#d#I@T@TypeDef | | Def,RelChild,RelOver,RelSpecialization | rel: 2 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelOver,RelSpecialization | TypeDef | c:index-source.cpp@ST>2#T#T@PseudoOverridesInSpecializations@T@TypeDef + + using TypeAlias = int; +// CHECK: [[@LINE-1]]:9 | type-alias/C++ | TypeAlias | c:@S@PseudoOverridesInSpecializations>#d#I@TypeAlias | | Def,RelChild,RelOver,RelSpecialization | rel: 2 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelOver,RelSpecialization | TypeAlias | c:@ST>2#T#T@PseudoOverridesInSpecializations@TypeAlias + + enum anEnum { }; +// CHECK: [[@LINE-1]]:8 | enum/C | anEnum | c:@S@PseudoOverridesInSpecializations>#d#I@E@anEnum | | Def,RelChild,RelOver,RelSpecialization | rel: 2 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelOver,RelSpecialization | anEnum | c:@ST>2#T#T@PseudoOverridesInSpecializations@E@anEnum + class Struct { }; +// CHECK: [[@LINE-1]]:9 | class/C++ | Struct | c:@S@PseudoOverridesInSpecializations>#d#I@S@Struct | | Def,RelChild,RelOver,RelSpecialization | rel: 2 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelOver,RelSpecialization | Struct | c:@ST>2#T#T@PseudoOverridesInSpecializations@S@Struct + union Union { }; +// CHECK: [[@LINE-1]]:9 | union/C | Union | c:@S@PseudoOverridesInSpecializations>#d#I@U@Union | | Def,RelChild,RelOver,RelSpecialization | rel: 2 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelOver,RelSpecialization | Union | c:@ST>2#T#T@PseudoOverridesInSpecializations@U@Union + + struct TypealiasOrRecord { }; +// CHECK: [[@LINE-1]]:10 | struct/C | TypealiasOrRecord | c:@S@PseudoOverridesInSpecializations>#d#I@S@TypealiasOrRecord | | Def,RelChild,RelOver,RelSpecialization | rel: 2 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelOver,RelSpecialization | TypealiasOrRecord | c:@ST>2#T#T@PseudoOverridesInSpecializations@TypealiasOrRecord + + template struct InnerTemplate { }; +// CHECK: [[@LINE-1]]:31 | struct(Gen)/C++ | InnerTemplate | c:@S@PseudoOverridesInSpecializations>#d#I@ST>1#T@InnerTemplate | | Def,RelChild,RelOver,RelSpecialization | rel: 2 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelOver,RelSpecialization | InnerTemplate | c:@ST>2#T#T@PseudoOverridesInSpecializations@ST>1#T@InnerTemplate + template struct InnerTemplate { }; +// CHECK-NOT: RelOver +}; + +template +class PseudoOverridesInSpecializations { + typedef float TypealiasOrRecord; +// CHECK: [[@LINE-1]]:17 | type-alias/C | TypealiasOrRecord | c:index-source.cpp@SP>1#T@PseudoOverridesInSpecializations>#f#t0.0@T@TypealiasOrRecord | | Def,RelChild,RelOver,RelSpecialization | rel: 2 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelOver,RelSpecialization | TypealiasOrRecord | c:@ST>2#T#T@PseudoOverridesInSpecializations@TypealiasOrRecord +}; + +template +class ConflictingPseudoOverridesInSpecialization { + void foo(T x); + void foo(U x); +}; + +template +class ConflictingPseudoOverridesInSpecialization { + void foo(T x); +// CHECK: [[@LINE-1]]:8 | instance-method/C++ | foo | c:@SP>1#T@ConflictingPseudoOverridesInSpecialization>#I#t0.0@F@foo#S0_# | | Decl,RelChild,RelOver,RelSpecialization | rel: 3 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelOver,RelSpecialization | foo | c:@ST>2#T#T@ConflictingPseudoOverridesInSpecialization@F@foo#t0.0# +// CHECK-NEXT: RelOver,RelSpecialization | foo | c:@ST>2#T#T@ConflictingPseudoOverridesInSpecialization@F@foo#t0.1# +}; + +template +void functionSpecialization(); + +template +void functionSpecialization() { } +// CHECK: [[@LINE-1]]:6 | function/C | functionSpecialization | c:@FT@>3#T#T#NIfunctionSpecialization#v# | | Def | rel: 0 + +template<> +void functionSpecialization(); +// CHECK: [[@LINE-1]]:6 | function(Gen,TS)/C++ | functionSpecialization | c:@F@functionSpecialization<#I#I#VI0># | __Z22functionSpecializationIiiLi0EEvv | Decl,RelSpecialization | rel: 1 +// CHECK-NEXT: RelSpecialization | functionSpecialization | c:@FT@>3#T#T#NIfunctionSpecialization#v# + +template<> +void functionSpecialization() { } +// CHECK: [[@LINE-1]]:6 | function(Gen,TS)/C++ | functionSpecialization | c:@F@functionSpecialization<#I#I#VI0># | __Z22functionSpecializationIiiLi0EEvv | Def,RelSpecialization | rel: 1 +// CHECK-NEXT: RelSpecialization | functionSpecialization | c:@FT@>3#T#T#NIfunctionSpecialization#v# + +struct ContainsSpecializedMemberFunction { + template + void memberSpecialization(); +}; + +template +void ContainsSpecializedMemberFunction::memberSpecialization() { +// CHECK: [[@LINE-1]]:41 | instance-method/C++ | memberSpecialization | c:@S@ContainsSpecializedMemberFunction@FT@>1#TmemberSpecialization#v# | | Def,RelChild | rel: 1 +// CHECK-NEXT: RelChild +} + +template<> +void ContainsSpecializedMemberFunction::memberSpecialization() { +// CHECK: [[@LINE-1]]:41 | instance-method(Gen,TS)/C++ | memberSpecialization | c:@S@ContainsSpecializedMemberFunction@F@memberSpecialization<#I># | __ZN33ContainsSpecializedMemberFunction20memberSpecializationIiEEvv | Def,RelChild,RelSpecialization | rel: 2 +// CHECK-NEXT: RelChild +// CHECK-NEXT: RelSpecialization | memberSpecialization | c:@S@ContainsSpecializedMemberFunction@FT@>1#TmemberSpecialization#v# +} + +template +class SpecializationDecl; +// CHECK: [[@LINE-1]]:7 | class(Gen)/C++ | SpecializationDecl | c:@ST>1#T@SpecializationDecl | | Ref | rel: 0 + +template +class SpecializationDecl { }; +// CHECK: [[@LINE-1]]:7 | class(Gen)/C++ | SpecializationDecl | c:@ST>1#T@SpecializationDecl | | Def | rel: 0 + +template<> +class SpecializationDecl; +// CHECK: [[@LINE-1]]:7 | class(Gen)/C++ | SpecializationDecl | c:@ST>1#T@SpecializationDecl | | Ref | rel: 0 + +template<> +class SpecializationDecl { }; +// CHECK: [[@LINE-1]]:7 | class(Gen,TS)/C++ | SpecializationDecl | c:@S@SpecializationDecl>#I | | Def,RelSpecialization | rel: 1 +// CHECK-NEXT: RelSpecialization | SpecializationDecl | c:@ST>1#T@SpecializationDecl +// CHECK-NEXT: [[@LINE-3]]:7 | class(Gen)/C++ | SpecializationDecl | c:@ST>1#T@SpecializationDecl | | Ref | rel: 0 + +template +class PartialSpecilizationClass; +// CHECK: [[@LINE-1]]:7 | class(Gen)/C++ | PartialSpecilizationClass | c:@ST>2#T#T@PartialSpecilizationClass | | Ref | rel: 0 +// CHECK-NEXT: [[@LINE-2]]:33 | class/C++ | Cls | c:@S@Cls | | Ref | rel: 0 + +template<> +class PartialSpecilizationClass : Cls { }; +// CHECK: [[@LINE-1]]:7 | class(Gen,TS)/C++ | PartialSpecilizationClass | c:@S@PartialSpecilizationClass>#$@S@Cls#S0_ | | Def,RelSpecialization | rel: 1 +// CHECK-NEXT: RelSpecialization | PartialSpecilizationClass | c:@ST>2#T#T@PartialSpecilizationClass +// CHECK-NEXT: [[@LINE-3]]:45 | class/C++ | Cls | c:@S@Cls | | Ref,RelBase,RelCont | rel: 1 +// CHECK-NEXT: RelBase,RelCont | PartialSpecilizationClass | c:@S@PartialSpecilizationClass>#$@S@Cls#S0_ +// CHECK-NEXT: [[@LINE-5]]:7 | class(Gen)/C++ | PartialSpecilizationClass | c:@ST>2#T#T@PartialSpecilizationClass | | Ref | rel: 0 +// CHECK-NEXT: [[@LINE-6]]:33 | class/C++ | Cls | c:@S@Cls | | Ref | rel: 0 +// CHECK-NEXT: [[@LINE-7]]:38 | class/C++ | Cls | c:@S@Cls | | Ref | rel: 0 diff --git a/test/Index/Core/index-source.m b/test/Index/Core/index-source.m index f488914..a2eef89 100644 --- a/test/Index/Core/index-source.m +++ b/test/Index/Core/index-source.m @@ -353,7 +353,8 @@ typedef MyGenCls MyEnumerator; typedef NS_ENUM(AnotherEnum, int) { // CHECK-NOT: [[@LINE-1]]:17 | type-alias/C | AnotherEnum | -// CHECK: [[@LINE-2]]:17 | enum/C | AnotherEnum | [[AnotherEnum_USR:.*]] | {{.*}} | Ref,RelCont | rel: 1 +// CHECK-NOT: [[@LINE-2]]:17 | {{.*}} | Ref +// CHECK: [[@LINE-3]]:17 | enum/C | AnotherEnum | [[AnotherEnum_USR:.*]] | {{.*}} | Def | rel: 0 AnotherEnumFirst = 0, AnotherEnumSecond = 1, AnotherEnumThird = 2, diff --git a/test/Index/index-refs.cpp b/test/Index/index-refs.cpp index 5a1ba1e..d8bede0 100644 --- a/test/Index/index-refs.cpp +++ b/test/Index/index-refs.cpp @@ -105,6 +105,7 @@ int ginitlist[] = {EnumVal}; // CHECK: [indexDeclaration]: kind: c++-class-template | name: TS | {{.*}} | loc: 47:8 // CHECK-NEXT: [indexDeclaration]: kind: struct-template-partial-spec | name: TS | USR: c:@SP>1#T@TS>#t0.0#I | {{.*}} | loc: 50:8 // CHECK-NEXT: [indexDeclaration]: kind: typedef | name: MyInt | USR: c:index-refs.cpp@SP>1#T@TS>#t0.0#I@T@MyInt | {{.*}} | loc: 51:15 | semantic-container: [TS:50:8] | lexical-container: [TS:50:8] +// CHECK-NEXT: [indexEntityReference]: kind: c++-class-template | name: TS | USR: c:@ST>2#T#T@TS | lang: C++ | cursor: TemplateRef=TS:47:8 | loc: 50:8 | :: <> | container: [TU] | refkind: direct /* when indexing implicit instantiations [indexDeclaration]: kind: struct-template-spec | name: TS | USR: c:@S@TS>#I | {{.*}} | loc: 50:8 [indexDeclaration]: kind: typedef | name: MyInt | USR: c:index-refs.cpp@593@S@TS>#I@T@MyInt | {{.*}} | loc: 51:15 | semantic-container: [TS:50:8] | lexical-container: [TS:50:8] diff --git a/test/Index/print-type.cpp b/test/Index/print-type.cpp index 108ba53..ff150f7 100644 --- a/test/Index/print-type.cpp +++ b/test/Index/print-type.cpp @@ -71,6 +71,11 @@ struct Specialization; Specialization& > templRefParam; auto autoTemplRefParam = templRefParam; +template +struct DefaultedTypeExample {}; + +typedef DefaultedTypeExample DefaultedTypeAlias; + // RUN: c-index-test -test-print-type %s -std=c++14 | FileCheck %s // CHECK: Namespace=outer:1:11 (Definition) [type=] [typekind=Invalid] [isPOD=0] // CHECK: ClassTemplate=Foo:4:8 (Definition) [type=] [typekind=Invalid] [isPOD=0] @@ -115,7 +120,7 @@ auto autoTemplRefParam = templRefParam; // CHECK: TemplateRef=Baz:9:8 [type=] [typekind=Invalid] [isPOD=0] // CHECK: IntegerLiteral= [type=int] [typekind=Int] [isPOD=1] // CHECK: TemplateRef=Foo:4:8 [type=] [typekind=Invalid] [isPOD=0] -// CHECK: FieldDecl=qux:29:38 (Definition) [type=Qux, outer::inner::Bar::FooType>] [typekind=Unexposed] [templateargs/4= [type=int] [typekind=Int] [type=char *] [typekind=Pointer] [type=Foo] [typekind=Unexposed] [type=outer::inner::Bar::FooType] [typekind=Typedef]] [canonicaltype=outer::Qux, int>] [canonicaltypekind=Record] [canonicaltemplateargs/4= [type=int] [typekind=Int] [type=char *] [typekind=Pointer] [type=outer::Foo] [typekind=Record] [type=int] [typekind=Int]] [isPOD=1] +// CHECK: FieldDecl=qux:29:38 (Definition) [type=Qux, outer::inner::Bar::FooType>] [typekind=Unexposed] [templateargs/4= [type=int] [typekind=Int] [type=char *] [typekind=Pointer] [type=outer::Foo] [typekind=Record] [type=int] [typekind=Int]] [canonicaltype=outer::Qux, int>] [canonicaltypekind=Record] [canonicaltemplateargs/4= [type=int] [typekind=Int] [type=char *] [typekind=Pointer] [type=outer::Foo] [typekind=Record] [type=int] [typekind=Int]] [isPOD=1] // CHECK: TemplateRef=Qux:12:8 [type=] [typekind=Invalid] [isPOD=0] // CHECK: TemplateRef=Foo:4:8 [type=] [typekind=Invalid] [isPOD=0] // CHECK: FunctionTemplate=tbar:36:3 [type=T (int)] [typekind=FunctionProto] [canonicaltype=type-parameter-0-0 (int)] [canonicaltypekind=FunctionProto] [resulttype=T] [resulttypekind=Unexposed] [isPOD=0] @@ -177,3 +182,4 @@ auto autoTemplRefParam = templRefParam; // CHECK: VarDecl=autoTemplRefParam:72:6 (Definition) [type=Specialization &>] [typekind=Auto] [templateargs/1= [type=Specialization &] [typekind=LValueReference]] [canonicaltype=Specialization &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization &] [typekind=LValueReference]] [isPOD=1] // CHECK: UnexposedExpr=templRefParam:71:40 [type=const Specialization &>] [typekind=Unexposed] const [templateargs/1= [type=Specialization &] [typekind=LValueReference]] [canonicaltype=const Specialization &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization &] [typekind=LValueReference]] [isPOD=1] // CHECK: DeclRefExpr=templRefParam:71:40 [type=Specialization &>] [typekind=Unexposed] [templateargs/1= [type=Specialization &] [typekind=LValueReference]] [canonicaltype=Specialization &>] [canonicaltypekind=Record] [canonicaltemplateargs/1= [type=Specialization &] [typekind=LValueReference]] [isPOD=1] +// CHECK: TypedefDecl=DefaultedTypeAlias:77:35 (Definition) [type=DefaultedTypeAlias] [typekind=Typedef] [templateargs/2= [type=int] [typekind=Int] [type=int] [typekind=Int]] [canonicaltype=DefaultedTypeExample] [canonicaltypekind=Record] [canonicaltemplateargs/2= [type=int] [typekind=Int] [type=int] [typekind=Int]] [isPOD=0] diff --git a/test/Modules/Inputs/objc-desig-init/A.h b/test/Modules/Inputs/objc-desig-init/A.h new file mode 100644 index 0000000..02e0105 --- /dev/null +++ b/test/Modules/Inputs/objc-desig-init/A.h @@ -0,0 +1 @@ +#import "A2.h" diff --git a/test/Modules/Inputs/objc-desig-init/A2.h b/test/Modules/Inputs/objc-desig-init/A2.h new file mode 100644 index 0000000..5a4ceb4 --- /dev/null +++ b/test/Modules/Inputs/objc-desig-init/A2.h @@ -0,0 +1,4 @@ +#import "Base.h" + +@interface A2 : Base +@end diff --git a/test/Modules/Inputs/objc-desig-init/Base.h b/test/Modules/Inputs/objc-desig-init/Base.h new file mode 100644 index 0000000..4718425 --- /dev/null +++ b/test/Modules/Inputs/objc-desig-init/Base.h @@ -0,0 +1,4 @@ +@class NSString; +@interface Base +- (id)initWithNibName:(NSString *)nibNameOrNil __attribute__((objc_designated_initializer)); +@end diff --git a/test/Modules/Inputs/objc-desig-init/X.h b/test/Modules/Inputs/objc-desig-init/X.h new file mode 100644 index 0000000..cb46ba7 --- /dev/null +++ b/test/Modules/Inputs/objc-desig-init/X.h @@ -0,0 +1,4 @@ +#import "A2.h" + +@interface X : A2 +@end diff --git a/test/Modules/Inputs/objc-desig-init/module.modulemap b/test/Modules/Inputs/objc-desig-init/module.modulemap new file mode 100644 index 0000000..0150efb --- /dev/null +++ b/test/Modules/Inputs/objc-desig-init/module.modulemap @@ -0,0 +1,9 @@ +module Base { + header "Base.h" + export * +} + +module A { + header "A.h" + export * +} diff --git a/test/Modules/Inputs/template-default-args/a.h b/test/Modules/Inputs/template-default-args/a.h index 532cbc8..1541d5f 100644 --- a/test/Modules/Inputs/template-default-args/a.h +++ b/test/Modules/Inputs/template-default-args/a.h @@ -14,3 +14,16 @@ struct FriendL { template friend struct L; }; END + +namespace DeferredLookup { + template using X = U; + template void f() { (void) X(); } + template int n = X(); + template struct S { X xt; enum E : int; }; + template enum S::E : int { a = X() }; + + namespace Indirect { + template struct A {}; + template struct B { template using C = A; }; + } +} diff --git a/test/Modules/Inputs/template-default-args/d.h b/test/Modules/Inputs/template-default-args/d.h index 5961c91..07ececc 100644 --- a/test/Modules/Inputs/template-default-args/d.h +++ b/test/Modules/Inputs/template-default-args/d.h @@ -4,3 +4,10 @@ struct FriendL { template friend struct L; }; END + +namespace DeferredLookup { + namespace Indirect { + template struct A {}; + template struct B { template using C = A; }; + } +} diff --git a/test/Modules/localsubmodulevis.m b/test/Modules/localsubmodulevis.m index f8c4b3c..bf31765 100644 --- a/test/Modules/localsubmodulevis.m +++ b/test/Modules/localsubmodulevis.m @@ -1,4 +1,4 @@ -// RUN: rm -rf %t +// RUN: rm -rf %t.a %t.b // RUN: %clang_cc1 -fmodules -fmodules-local-submodule-visibility -fimplicit-module-maps -fmodules-cache-path=%t.a -I %S/Inputs/preprocess -verify %s // RUN: %clang_cc1 -fmodules -fmodules-local-submodule-visibility -fimplicit-module-maps -fmodules-cache-path=%t.b -I %S/Inputs/preprocess -x c -verify -x c %s diff --git a/test/Modules/objc-designated-init-mod.m b/test/Modules/objc-designated-init-mod.m new file mode 100644 index 0000000..15c25a3 --- /dev/null +++ b/test/Modules/objc-designated-init-mod.m @@ -0,0 +1,17 @@ +// RUN: rm -rf %t +// RUN: %clang_cc1 -fmodules-cache-path=%t -fmodules -fimplicit-module-maps -I %S/Inputs/objc-desig-init %s -verify +// expected-no-diagnostics + +#import "X.h" +#import "Base.h" +#import "A.h" + +@implementation X + +- (instancetype)initWithNibName:(NSString *)nibName { + if ((self = [super initWithNibName:nibName])) { + return self; + } + return self; +} +@end diff --git a/test/Modules/template-default-args.cpp b/test/Modules/template-default-args.cpp index 9d16cbf..c51cb28 100644 --- a/test/Modules/template-default-args.cpp +++ b/test/Modules/template-default-args.cpp @@ -44,3 +44,20 @@ H<> h; // expected-error {{default argument of 'H' must be imported from module I<> i; L<> *l; END + +namespace DeferredLookup { + template using X = U; + template void f() { (void) X(); } + template int n = X(); // expected-warning {{extension}} + template struct S { X xt; enum E : int; }; + template enum S::E : int { a = X() }; + + void test() { + f(); + n = 1; + S s; + S::E e = S::E::a; + + Indirect::B::C indirect; + } +} diff --git a/test/OpenMP/capturing_in_templates.cpp b/test/OpenMP/capturing_in_templates.cpp new file mode 100644 index 0000000..a4dcbee --- /dev/null +++ b/test/OpenMP/capturing_in_templates.cpp @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s +// expected-no-diagnostics + +template +struct pair { + T1 t1; + T2 t2; + pair(T1 t1, T2 t2) : t1(t1), t2(t2) {} +}; + +template +pair make_pair(T1 &&t1, T2 &&t2) { + return {t1, t2}; +} + +// CHECK-LABEL: @main +int main(int argc, char **argv) { +// CHECK: call i32 @__tgt_target(i32 -1, i8* @{{.+}}.region_id, i32 0, i8** null, i8** null, i64* null, i32* null) +#pragma omp target + { + for (int i = 0; i < 64; ++i) { + for (int j = 0; j < 64; ++j) { + auto foo = make_pair(i * i, j * j); + } + } + } + return 0; +} + +// CHECK: call {{.+}} @{{.*}}make_pair diff --git a/test/OpenMP/distribute_parallel_for_codegen.cpp b/test/OpenMP/distribute_parallel_for_codegen.cpp new file mode 100644 index 0000000..62c38f1 --- /dev/null +++ b/test/OpenMP/distribute_parallel_for_codegen.cpp @@ -0,0 +1,2260 @@ +// Test host code gen +// RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64 +// RUN: %clang_cc1 -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64 +// RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32 +// RUN: %clang_cc1 -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32 + +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 +// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 +// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 +// expected-no-diagnostics +#ifndef HEADER +#define HEADER + + +template +T tmain() { + T *a, *b, *c; + int n = 10000; + int ch = 100; + + // no schedule clauses + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + } + + // dist_schedule: static no chunk + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for dist_schedule(static) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + } + + // dist_schedule: static chunk + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for dist_schedule(static, ch) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + } + + // schedule: static no chunk + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for schedule(static) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + } + + // schedule: static chunk + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for schedule(static, ch) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + } + + // schedule: dynamic no chunk + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for schedule(dynamic) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + } + + // schedule: dynamic chunk + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for schedule(dynamic, ch) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + } + + return T(); +} + +int main() { + double *a, *b, *c; + int n = 10000; + int ch = 100; + +#ifdef LAMBDA + // LAMBDA-LABEL: @main + // LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@.+]]( + [&]() { + // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( + + // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams( + // LAMBDA: call void [[OFFLOADING_FUN_1:@.+]]( + + // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams( + // LAMBDA: call void [[OFFLOADING_FUN_2:@.+]]( + + // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams( + // LAMBDA: call void [[OFFLOADING_FUN_3:@.+]]( + + // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams( + // LAMBDA: call void [[OFFLOADING_FUN_4:@.+]]( + + // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams( + // LAMBDA: call void [[OFFLOADING_FUN_5:@.+]]( + + // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams( + // LAMBDA: call void [[OFFLOADING_FUN_6:@.+]]( + + // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams( + // LAMBDA: call void [[OFFLOADING_FUN_7:@.+]]( + + // no schedule clauses + #pragma omp target + #pragma omp teams + // LAMBDA: define{{.+}} void [[OFFLOADING_FUN_1]]( + // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED_1:@.+]] to {{.+}}) + + #pragma omp distribute parallel for + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // LAMBDA: define{{.+}} void [[OMP_OUTLINED_1]]( + // LAMBDA-DAG: [[OMP_IV:%.omp.iv]] = alloca + // LAMBDA-DAG: [[OMP_LB:%.omp.comb.lb]] = alloca + // LAMBDA-DAG: [[OMP_UB:%.omp.comb.ub]] = alloca + // LAMBDA-DAG: [[OMP_ST:%.omp.stride]] = alloca + + // LAMBDA: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + + // check EUB for distribute + // LAMBDA-DAG: [[OMP_UB_VAL_1:%.+]] = load{{.+}} [[OMP_UB]], + // LAMBDA: [[NUM_IT_1:%.+]] = load{{.+}}, + // LAMBDA-DAG: [[CMP_UB_NUM_IT:%.+]] = icmp sgt {{.+}} [[OMP_UB_VAL_1]], [[NUM_IT_1]] + // LAMBDA: br {{.+}} [[CMP_UB_NUM_IT]], label %[[EUB_TRUE:.+]], label %[[EUB_FALSE:.+]] + // LAMBDA-DAG: [[EUB_TRUE]]: + // LAMBDA: [[NUM_IT_2:%.+]] = load{{.+}}, + // LAMBDA: br label %[[EUB_END:.+]] + // LAMBDA-DAG: [[EUB_FALSE]]: + // LAMBDA: [[OMP_UB_VAL2:%.+]] = load{{.+}} [[OMP_UB]], + // LAMBDA: br label %[[EUB_END]] + // LAMBDA-DAG: [[EUB_END]]: + // LAMBDA-DAG: [[EUB_RES:%.+]] = phi{{.+}} [ [[NUM_IT_2]], %[[EUB_TRUE]] ], [ [[OMP_UB_VAL2]], %[[EUB_FALSE]] ] + // LAMBDA: store{{.+}} [[EUB_RES]], {{.+}}* [[OMP_UB]], + + // initialize omp.iv + // LAMBDA: [[OMP_LB_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[OMP_LB]], + // LAMBDA: store {{.+}} [[OMP_LB_VAL_1]], {{.+}}* [[OMP_IV]], + // LAMBDA: br label %[[OMP_JUMP_BACK:.+]] + + // check exit condition + // LAMBDA: [[OMP_JUMP_BACK]]: + // LAMBDA-DAG: [[OMP_IV_VAL_1:%.+]] = load {{.+}} [[OMP_IV]], + // LAMBDA-DAG: [[OMP_UB_VAL_3:%.+]] = load {{.+}} [[OMP_UB]], + // LAMBDA: [[CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_IV_VAL_1]], [[OMP_UB_VAL_3]] + // LAMBDA: br {{.+}} [[CMP_IV_UB]], label %[[DIST_BODY:.+]], label %[[DIST_END:.+]] + + // check that PrevLB and PrevUB are passed to the 'for' + // LAMBDA: [[DIST_BODY]]: + // LAMBDA-DAG: [[OMP_PREV_LB:%.+]] = load {{.+}}, {{.+}} [[OMP_LB]], + // LAMBDA-64-DAG: [[OMP_PREV_LB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_LB]] to + // LAMBDA-DAG: [[OMP_PREV_UB:%.+]] = load {{.+}}, {{.+}} [[OMP_UB]], + // LAMBDA-64-DAG: [[OMP_PREV_UB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_UB]] to + // check that distlb and distub are properly passed to fork_call + // LAMBDA-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_1:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB]], i{{[0-9]+}} [[OMP_PREV_UB]], {{.+}}) + // LAMBDA-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_1:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_EXT]], i{{[0-9]+}} [[OMP_PREV_UB_EXT]], {{.+}}) + // LAMBDA: br label %[[DIST_INC:.+]] + + // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch + // LAMBDA: [[DIST_INC]]: + // LAMBDA-DAG: [[OMP_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_IV]], + // LAMBDA-DAG: [[OMP_ST_VAL_1:%.+]] = load {{.+}}, {{.+}}* [[OMP_ST]], + // LAMBDA: [[OMP_IV_INC:%.+]] = add{{.+}} [[OMP_IV_VAL_2]], [[OMP_ST_VAL_1]] + // LAMBDA: store{{.+}} [[OMP_IV_INC]], {{.+}}* [[OMP_IV]], + // LAMBDA: br label %[[OMP_JUMP_BACK]] + + // LAMBDA-DAG: call void @__kmpc_for_static_fini( + // LAMBDA: ret + + // implementation of 'parallel for' + // LAMBDA: define{{.+}} void [[OMP_PARFOR_OUTLINED_1]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}) + + // LAMBDA-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + + // initialize lb and ub to PrevLB and PrevUB + // LAMBDA-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], + // LAMBDA-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], + // LAMBDA-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], + // LAMBDA-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} + // LAMBDA-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // LAMBDA-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} + // LAMBDA-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, {{.+}} 34, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]],{{.+}}) + + // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used + // In this case we use EUB + // LAMBDA-DAG: [[OMP_PF_UB_VAL_1:%.+]] = load{{.+}} [[OMP_PF_UB]], + // LAMBDA: [[PF_NUM_IT_1:%.+]] = load{{.+}}, + // LAMBDA-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_1]], [[PF_NUM_IT_1]] + // LAMBDA: br i1 [[PF_CMP_UB_NUM_IT]], label %[[PF_EUB_TRUE:.+]], label %[[PF_EUB_FALSE:.+]] + // LAMBDA: [[PF_EUB_TRUE]]: + // LAMBDA: [[PF_NUM_IT_2:%.+]] = load{{.+}}, + // LAMBDA: br label %[[PF_EUB_END:.+]] + // LAMBDA-DAG: [[PF_EUB_FALSE]]: + // LAMBDA: [[OMP_PF_UB_VAL2:%.+]] = load{{.+}} [[OMP_PF_UB]], + // LAMBDA: br label %[[PF_EUB_END]] + // LAMBDA-DAG: [[PF_EUB_END]]: + // LAMBDA-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_NUM_IT_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL2]], %[[PF_EUB_FALSE]] ] + // LAMBDA: store{{.+}} [[PF_EUB_RES]],{{.+}} [[OMP_PF_UB]], + + // initialize omp.iv + // LAMBDA: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // LAMBDA: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + // LAMBDA: br label %[[OMP_PF_JUMP_BACK:.+]] + + // check exit condition + // LAMBDA: [[OMP_PF_JUMP_BACK]]: + // LAMBDA-DAG: [[OMP_PF_IV_VAL_1:%.+]] = load {{.+}} [[OMP_PF_IV]], + // LAMBDA-DAG: [[OMP_PF_UB_VAL_3:%.+]] = load {{.+}} [[OMP_PF_UB]], + // LAMBDA: [[PF_CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_PF_IV_VAL_1]], [[OMP_PF_UB_VAL_3]] + // LAMBDA: br {{.+}} [[PF_CMP_IV_UB]], label %[[PF_BODY:.+]], label %[[PF_END:.+]] + + // check that PrevLB and PrevUB are passed to the 'for' + // LAMBDA: [[PF_BODY]]: + // LAMBDA-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // LAMBDA: br label {{.+}} + + // check stride 1 for 'for' in 'distribute parallel for' + // LAMBDA-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_PF_IV]], + // LAMBDA: [[OMP_PF_IV_INC:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_2]], 1 + // LAMBDA: store{{.+}} [[OMP_PF_IV_INC]], {{.+}}* [[OMP_PF_IV]], + // LAMBDA: br label %[[OMP_PF_JUMP_BACK]] + + // LAMBDA-DAG: call void @__kmpc_for_static_fini( + // LAMBDA: ret + + [&]() { + a[i] = b[i] + c[i]; + }(); + } + + // dist_schedule: static no chunk (same sa default - no dist_schedule) + #pragma omp target + #pragma omp teams + // LAMBDA: define{{.+}} void [[OFFLOADING_FUN_2]]( + // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED_2:@.+]] to {{.+}}) + + #pragma omp distribute parallel for dist_schedule(static) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // LAMBDA: define{{.+}} void [[OMP_OUTLINED_2]]( + // LAMBDA-DAG: [[OMP_IV:%.omp.iv]] = alloca + // LAMBDA-DAG: [[OMP_LB:%.omp.comb.lb]] = alloca + // LAMBDA-DAG: [[OMP_UB:%.omp.comb.ub]] = alloca + // LAMBDA-DAG: [[OMP_ST:%.omp.stride]] = alloca + + // LAMBDA: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + + // check EUB for distribute + // LAMBDA-DAG: [[OMP_UB_VAL_1:%.+]] = load{{.+}} [[OMP_UB]], + // LAMBDA: [[NUM_IT_1:%.+]] = load{{.+}}, + // LAMBDA-DAG: [[CMP_UB_NUM_IT:%.+]] = icmp sgt {{.+}} [[OMP_UB_VAL_1]], [[NUM_IT_1]] + // LAMBDA: br {{.+}} [[CMP_UB_NUM_IT]], label %[[EUB_TRUE:.+]], label %[[EUB_FALSE:.+]] + // LAMBDA-DAG: [[EUB_TRUE]]: + // LAMBDA: [[NUM_IT_2:%.+]] = load{{.+}}, + // LAMBDA: br label %[[EUB_END:.+]] + // LAMBDA-DAG: [[EUB_FALSE]]: + // LAMBDA: [[OMP_UB_VAL2:%.+]] = load{{.+}} [[OMP_UB]], + // LAMBDA: br label %[[EUB_END]] + // LAMBDA-DAG: [[EUB_END]]: + // LAMBDA-DAG: [[EUB_RES:%.+]] = phi{{.+}} [ [[NUM_IT_2]], %[[EUB_TRUE]] ], [ [[OMP_UB_VAL2]], %[[EUB_FALSE]] ] + // LAMBDA: store{{.+}} [[EUB_RES]], {{.+}}* [[OMP_UB]], + + // initialize omp.iv + // LAMBDA: [[OMP_LB_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[OMP_LB]], + // LAMBDA: store {{.+}} [[OMP_LB_VAL_1]], {{.+}}* [[OMP_IV]], + // LAMBDA: br label %[[OMP_JUMP_BACK:.+]] + + // check exit condition + // LAMBDA: [[OMP_JUMP_BACK]]: + // LAMBDA-DAG: [[OMP_IV_VAL_1:%.+]] = load {{.+}} [[OMP_IV]], + // LAMBDA-DAG: [[OMP_UB_VAL_3:%.+]] = load {{.+}} [[OMP_UB]], + // LAMBDA: [[CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_IV_VAL_1]], [[OMP_UB_VAL_3]] + // LAMBDA: br {{.+}} [[CMP_IV_UB]], label %[[DIST_BODY:.+]], label %[[DIST_END:.+]] + + // check that PrevLB and PrevUB are passed to the 'for' + // LAMBDA: [[DIST_BODY]]: + // LAMBDA-DAG: [[OMP_PREV_LB:%.+]] = load {{.+}}, {{.+}} [[OMP_LB]], + // LAMBDA-64-DAG: [[OMP_PREV_LB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_LB]] to + // LAMBDA-DAG: [[OMP_PREV_UB:%.+]] = load {{.+}}, {{.+}} [[OMP_UB]], + // LAMBDA-64-DAG: [[OMP_PREV_UB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_UB]] to + // check that distlb and distub are properly passed to fork_call + // LAMBDA-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_2:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_EXT]], i{{[0-9]+}} [[OMP_PREV_UB_EXT]], {{.+}}) + // LAMBDA-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_2:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB]], i{{[0-9]+}} [[OMP_PREV_UB]], {{.+}}) + // LAMBDA: br label %[[DIST_INC:.+]] + + // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch + // LAMBDA: [[DIST_INC]]: + // LAMBDA-DAG: [[OMP_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_IV]], + // LAMBDA-DAG: [[OMP_ST_VAL_1:%.+]] = load {{.+}}, {{.+}}* [[OMP_ST]], + // LAMBDA: [[OMP_IV_INC:%.+]] = add{{.+}} [[OMP_IV_VAL_2]], [[OMP_ST_VAL_1]] + // LAMBDA: store{{.+}} [[OMP_IV_INC]], {{.+}}* [[OMP_IV]], + // LAMBDA: br label %[[OMP_JUMP_BACK]] + + // LAMBDA-DAG: call void @__kmpc_for_static_fini( + // LAMBDA: ret + + // implementation of 'parallel for' + // LAMBDA: define{{.+}} void [[OMP_PARFOR_OUTLINED_2]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}) + + // LAMBDA-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + + // initialize lb and ub to PrevLB and PrevUB + // LAMBDA-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], + // LAMBDA-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], + // LAMBDA-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], + // LAMBDA-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} + // LAMBDA-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // LAMBDA-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} + // LAMBDA-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, {{.+}} 34, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]],{{.+}}) + + // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used + // In this case we use EUB + // LAMBDA-DAG: [[OMP_PF_UB_VAL_1:%.+]] = load{{.+}} [[OMP_PF_UB]], + // LAMBDA: [[PF_NUM_IT_1:%.+]] = load{{.+}}, + // LAMBDA-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_1]], [[PF_NUM_IT_1]] + // LAMBDA: br i1 [[PF_CMP_UB_NUM_IT]], label %[[PF_EUB_TRUE:.+]], label %[[PF_EUB_FALSE:.+]] + // LAMBDA: [[PF_EUB_TRUE]]: + // LAMBDA: [[PF_NUM_IT_2:%.+]] = load{{.+}}, + // LAMBDA: br label %[[PF_EUB_END:.+]] + // LAMBDA-DAG: [[PF_EUB_FALSE]]: + // LAMBDA: [[OMP_PF_UB_VAL2:%.+]] = load{{.+}} [[OMP_PF_UB]], + // LAMBDA: br label %[[PF_EUB_END]] + // LAMBDA-DAG: [[PF_EUB_END]]: + // LAMBDA-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_NUM_IT_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL2]], %[[PF_EUB_FALSE]] ] + // LAMBDA: store{{.+}} [[PF_EUB_RES]],{{.+}} [[OMP_PF_UB]], + + // initialize omp.iv + // LAMBDA: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // LAMBDA: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + // LAMBDA: br label %[[OMP_PF_JUMP_BACK:.+]] + + // check exit condition + // LAMBDA: [[OMP_PF_JUMP_BACK]]: + // LAMBDA-DAG: [[OMP_PF_IV_VAL_1:%.+]] = load {{.+}} [[OMP_PF_IV]], + // LAMBDA-DAG: [[OMP_PF_UB_VAL_3:%.+]] = load {{.+}} [[OMP_PF_UB]], + // LAMBDA: [[PF_CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_PF_IV_VAL_1]], [[OMP_PF_UB_VAL_3]] + // LAMBDA: br {{.+}} [[PF_CMP_IV_UB]], label %[[PF_BODY:.+]], label %[[PF_END:.+]] + + // check that PrevLB and PrevUB are passed to the 'for' + // LAMBDA: [[PF_BODY]]: + // LAMBDA-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // LAMBDA: br label {{.+}} + + // check stride 1 for 'for' in 'distribute parallel for' + // LAMBDA-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_PF_IV]], + // LAMBDA: [[OMP_PF_IV_INC:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_2]], 1 + // LAMBDA: store{{.+}} [[OMP_PF_IV_INC]], {{.+}}* [[OMP_PF_IV]], + // LAMBDA: br label %[[OMP_PF_JUMP_BACK]] + + // LAMBDA-DAG: call void @__kmpc_for_static_fini( + // LAMBDA: ret + [&]() { + a[i] = b[i] + c[i]; + }(); + } + + // dist_schedule: static chunk + #pragma omp target + #pragma omp teams + // LAMBDA: define{{.+}} void [[OFFLOADING_FUN_3]]( + // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 5, {{.+}}* [[OMP_OUTLINED_3:@.+]] to {{.+}}) + + #pragma omp distribute parallel for dist_schedule(static, ch) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // LAMBDA: define{{.+}} void [[OMP_OUTLINED_3]]( + // LAMBDA-DAG: [[OMP_IV:%.omp.iv]] = alloca + // LAMBDA-DAG: [[OMP_LB:%.omp.comb.lb]] = alloca + // LAMBDA-DAG: [[OMP_UB:%.omp.comb.ub]] = alloca + // LAMBDA-DAG: [[OMP_ST:%.omp.stride]] = alloca + + // unlike the previous tests, in this one we have a outer and inner loop for 'distribute' + // LAMBDA: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 91, + // LAMBDA: br label %[[DIST_OUTER_LOOP_HEADER:.+]] + + // LAMBDA: [[DIST_OUTER_LOOP_HEADER]]: + // check EUB for distribute + // LAMBDA-DAG: [[OMP_UB_VAL_1:%.+]] = load{{.+}} [[OMP_UB]], + // LAMBDA: [[NUM_IT_1:%.+]] = load{{.+}}, + // LAMBDA-DAG: [[CMP_UB_NUM_IT:%.+]] = icmp sgt {{.+}} [[OMP_UB_VAL_1]], [[NUM_IT_1]] + // LAMBDA: br {{.+}} [[CMP_UB_NUM_IT]], label %[[EUB_TRUE:.+]], label %[[EUB_FALSE:.+]] + // LAMBDA-DAG: [[EUB_TRUE]]: + // LAMBDA: [[NUM_IT_2:%.+]] = load{{.+}}, + // LAMBDA: br label %[[EUB_END:.+]] + // LAMBDA-DAG: [[EUB_FALSE]]: + // LAMBDA: [[OMP_UB_VAL2:%.+]] = load{{.+}} [[OMP_UB]], + // LAMBDA: br label %[[EUB_END]] + // LAMBDA-DAG: [[EUB_END]]: + // LAMBDA-DAG: [[EUB_RES:%.+]] = phi{{.+}} [ [[NUM_IT_2]], %[[EUB_TRUE]] ], [ [[OMP_UB_VAL2]], %[[EUB_FALSE]] ] + // LAMBDA: store{{.+}} [[EUB_RES]], {{.+}}* [[OMP_UB]], + + // initialize omp.iv + // LAMBDA: [[OMP_LB_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[OMP_LB]], + // LAMBDA: store {{.+}} [[OMP_LB_VAL_1]], {{.+}}* [[OMP_IV]], + + // check exit condition + // LAMBDA-DAG: [[OMP_IV_VAL_1:%.+]] = load {{.+}} [[OMP_IV]], + // LAMBDA-DAG: [[OMP_UB_VAL_3:%.+]] = load {{.+}} [[OMP_UB]], + // LAMBDA: [[CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_IV_VAL_1]], [[OMP_UB_VAL_3]] + // LAMBDA: br {{.+}} [[CMP_IV_UB]], label %[[DIST_OUTER_LOOP_BODY:.+]], label %[[DIST_OUTER_LOOP_END:.+]] + + // LAMBDA: [[DIST_OUTER_LOOP_BODY]]: + // LAMBDA: br label %[[DIST_INNER_LOOP_HEADER:.+]] + + // LAMBDA: [[DIST_INNER_LOOP_HEADER]]: + // LAMBDA-DAG: [[OMP_IV_VAL_2:%.+]] = load {{.+}} [[OMP_IV]], + // LAMBDA-DAG: [[OMP_UB_VAL_4:%.+]] = load {{.+}} [[OMP_UB]], + // LAMBDA: [[CMP_IV_UB_2:%.+]] = icmp sle {{.+}} [[OMP_IV_VAL_2]], [[OMP_UB_VAL_4]] + // LAMBDA: br{{.+}} [[CMP_IV_UB_2]], label %[[DIST_INNER_LOOP_BODY:.+]], label %[[DIST_INNER_LOOP_END:.+]] + + // check that PrevLB and PrevUB are passed to the 'for' + // LAMBDA: [[DIST_INNER_LOOP_BODY]]: + // LAMBDA-DAG: [[OMP_PREV_LB:%.+]] = load {{.+}}, {{.+}} [[OMP_LB]], + // LAMBDA-64-DAG: [[OMP_PREV_LB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_LB]] to {{.+}} + // LAMBDA-DAG: [[OMP_PREV_UB:%.+]] = load {{.+}}, {{.+}} [[OMP_UB]], + // LAMBDA-64-DAG: [[OMP_PREV_UB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_UB]] to {{.+}} + // check that distlb and distub are properly passed to fork_call + // LAMBDA-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_3:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_EXT]], i{{[0-9]+}} [[OMP_PREV_UB_EXT]], {{.+}}) + // LAMBDA-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_3:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB]], i{{[0-9]+}} [[OMP_PREV_UB]], {{.+}}) + // LAMBDA: br label %[[DIST_INNER_LOOP_INC:.+]] + + // check DistInc + // LAMBDA: [[DIST_INNER_LOOP_INC]]: + // LAMBDA-DAG: [[OMP_IV_VAL_3:%.+]] = load {{.+}}, {{.+}}* [[OMP_IV]], + // LAMBDA-DAG: [[OMP_ST_VAL_1:%.+]] = load {{.+}}, {{.+}}* [[OMP_ST]], + // LAMBDA: [[OMP_IV_INC:%.+]] = add{{.+}} [[OMP_IV_VAL_3]], [[OMP_ST_VAL_1]] + // LAMBDA: store{{.+}} [[OMP_IV_INC]], {{.+}}* [[OMP_IV]], + // LAMBDA: br label %[[DIST_INNER_LOOP_HEADER]] + + // LAMBDA: [[DIST_INNER_LOOP_END]]: + // LAMBDA: br label %[[DIST_OUTER_LOOP_INC:.+]] + + // LAMBDA: [[DIST_OUTER_LOOP_INC]]: + // check NextLB and NextUB + // LAMBDA-DAG: [[OMP_LB_VAL_2:%.+]] = load{{.+}}, {{.+}} [[OMP_LB]], + // LAMBDA-DAG: [[OMP_ST_VAL_2:%.+]] = load{{.+}}, {{.+}} [[OMP_ST]], + // LAMBDA-DAG: [[OMP_LB_NEXT:%.+]] = add{{.+}} [[OMP_LB_VAL_2]], [[OMP_ST_VAL_2]] + // LAMBDA: store{{.+}} [[OMP_LB_NEXT]], {{.+}}* [[OMP_LB]], + // LAMBDA-DAG: [[OMP_UB_VAL_5:%.+]] = load{{.+}}, {{.+}} [[OMP_UB]], + // LAMBDA-DAG: [[OMP_ST_VAL_3:%.+]] = load{{.+}}, {{.+}} [[OMP_ST]], + // LAMBDA-DAG: [[OMP_UB_NEXT:%.+]] = add{{.+}} [[OMP_UB_VAL_5]], [[OMP_ST_VAL_3]] + // LAMBDA: store{{.+}} [[OMP_UB_NEXT]], {{.+}}* [[OMP_UB]], + // LAMBDA: br label %[[DIST_OUTER_LOOP_HEADER]] + + // outer loop exit + // LAMBDA: [[DIST_OUTER_LOOP_END]]: + // LAMBDA-DAG: call void @__kmpc_for_static_fini( + // LAMBDA: ret + + // skip implementation of 'parallel for': using default scheduling and was tested above + [&]() { + a[i] = b[i] + c[i]; + }(); + } + + // schedule: static no chunk + #pragma omp target + #pragma omp teams + // LAMBDA: define{{.+}} void [[OFFLOADING_FUN_4]]( + // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED_4:@.+]] to {{.+}}) + + #pragma omp distribute parallel for schedule(static) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // LAMBDA: define{{.+}} void [[OMP_OUTLINED_4]]( + // LAMBDA-DAG: [[OMP_IV:%.omp.iv]] = alloca + // LAMBDA-DAG: [[OMP_LB:%.omp.comb.lb]] = alloca + // LAMBDA-DAG: [[OMP_UB:%.omp.comb.ub]] = alloca + // LAMBDA-DAG: [[OMP_ST:%.omp.stride]] = alloca + + // LAMBDA: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + // LAMBDA: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_4:@.+]] to {{.+}}, + // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case + // LAMBDA: ret + + // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default) + // LAMBDA: define{{.+}} void [[OMP_PARFOR_OUTLINED_4]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}) + + // LAMBDA-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + + // initialize lb and ub to PrevLB and PrevUB + // LAMBDA-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], + // LAMBDA-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], + // LAMBDA-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], + // LAMBDA-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} + // LAMBDA-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // LAMBDA-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} + // LAMBDA-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, {{.+}} 34, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]],{{.+}}) + + // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used + // In this case we use EUB + // LAMBDA-DAG: [[OMP_PF_UB_VAL_1:%.+]] = load{{.+}} [[OMP_PF_UB]], + // LAMBDA: [[PF_NUM_IT_1:%.+]] = load{{.+}}, + // LAMBDA-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_1]], [[PF_NUM_IT_1]] + // LAMBDA: br i1 [[PF_CMP_UB_NUM_IT]], label %[[PF_EUB_TRUE:.+]], label %[[PF_EUB_FALSE:.+]] + // LAMBDA: [[PF_EUB_TRUE]]: + // LAMBDA: [[PF_NUM_IT_2:%.+]] = load{{.+}}, + // LAMBDA: br label %[[PF_EUB_END:.+]] + // LAMBDA-DAG: [[PF_EUB_FALSE]]: + // LAMBDA: [[OMP_PF_UB_VAL2:%.+]] = load{{.+}} [[OMP_PF_UB]], + // LAMBDA: br label %[[PF_EUB_END]] + // LAMBDA-DAG: [[PF_EUB_END]]: + // LAMBDA-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_NUM_IT_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL2]], %[[PF_EUB_FALSE]] ] + // LAMBDA: store{{.+}} [[PF_EUB_RES]],{{.+}} [[OMP_PF_UB]], + + // initialize omp.iv + // LAMBDA: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // LAMBDA: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + // LAMBDA: br label %[[OMP_PF_JUMP_BACK:.+]] + + // check exit condition + // LAMBDA: [[OMP_PF_JUMP_BACK]]: + // LAMBDA-DAG: [[OMP_PF_IV_VAL_1:%.+]] = load {{.+}} [[OMP_PF_IV]], + // LAMBDA-DAG: [[OMP_PF_UB_VAL_3:%.+]] = load {{.+}} [[OMP_PF_UB]], + // LAMBDA: [[PF_CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_PF_IV_VAL_1]], [[OMP_PF_UB_VAL_3]] + // LAMBDA: br {{.+}} [[PF_CMP_IV_UB]], label %[[PF_BODY:.+]], label %[[PF_END:.+]] + + // check that PrevLB and PrevUB are passed to the 'for' + // LAMBDA: [[PF_BODY]]: + // LAMBDA-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // LAMBDA: br label {{.+}} + + // check stride 1 for 'for' in 'distribute parallel for' + // LAMBDA-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_PF_IV]], + // LAMBDA: [[OMP_PF_IV_INC:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_2]], 1 + // LAMBDA: store{{.+}} [[OMP_PF_IV_INC]], {{.+}}* [[OMP_PF_IV]], + // LAMBDA: br label %[[OMP_PF_JUMP_BACK]] + + // LAMBDA-DAG: call void @__kmpc_for_static_fini( + // LAMBDA: ret + + [&]() { + a[i] = b[i] + c[i]; + }(); + } + + // schedule: static chunk + #pragma omp target + #pragma omp teams + // LAMBDA: define{{.+}} void [[OFFLOADING_FUN_5]]( + // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 5, {{.+}}* [[OMP_OUTLINED_5:@.+]] to {{.+}}) + + #pragma omp distribute parallel for schedule(static, ch) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // LAMBDA: define{{.+}} void [[OMP_OUTLINED_5]]( + // LAMBDA: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + // LAMBDA: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_5:@.+]] to {{.+}}, + // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case + // LAMBDA: ret + + // 'parallel for' implementation using outer and inner loops and PrevEUB + // LAMBDA: define{{.+}} void [[OMP_PARFOR_OUTLINED_5]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}, {{.+}}) + // LAMBDA-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_ST:%.omp.stride]] = alloca{{.+}}, + + // initialize lb and ub to PrevLB and PrevUB + // LAMBDA-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], + // LAMBDA-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], + // LAMBDA-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], + // LAMBDA-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} + // LAMBDA-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // LAMBDA-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} + // LAMBDA-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, {{.+}} 33, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]],{{.+}}) + // LAMBDA: br label %[[OMP_PF_OUTER_LOOP_HEADER:.+]] + + // check PrevEUB (using PrevUB instead of NumIt as upper bound) + // LAMBDA: [[OMP_PF_OUTER_LOOP_HEADER]]: + // LAMBDA-DAG: [[OMP_PF_UB_VAL_1:%.+]] = load{{.+}} [[OMP_PF_UB]], + // LAMBDA-64-DAG: [[OMP_PF_UB_VAL_CONV:%.+]] = sext{{.+}} [[OMP_PF_UB_VAL_1]] to + // LAMBDA: [[PF_PREV_UB_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // LAMBDA-64-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_CONV]], [[PF_PREV_UB_VAL_1]] + // LAMBDA-32-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_1]], [[PF_PREV_UB_VAL_1]] + // LAMBDA: br i1 [[PF_CMP_UB_NUM_IT]], label %[[PF_EUB_TRUE:.+]], label %[[PF_EUB_FALSE:.+]] + // LAMBDA: [[PF_EUB_TRUE]]: + // LAMBDA: [[PF_PREV_UB_VAL_2:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // LAMBDA: br label %[[PF_EUB_END:.+]] + // LAMBDA-DAG: [[PF_EUB_FALSE]]: + // LAMBDA: [[OMP_PF_UB_VAL_2:%.+]] = load{{.+}} [[OMP_PF_UB]], + // LAMBDA-64: [[OMP_PF_UB_VAL_2_CONV:%.+]] = sext{{.+}} [[OMP_PF_UB_VAL_2]] to + // LAMBDA: br label %[[PF_EUB_END]] + // LAMBDA-DAG: [[PF_EUB_END]]: + // LAMBDA-64-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_PREV_UB_VAL_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL_2_CONV]], %[[PF_EUB_FALSE]] ] + // LAMBDA-32-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_PREV_UB_VAL_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL_2]], %[[PF_EUB_FALSE]] ] + // LAMBDA-64-DAG: [[PF_EUB_RES_CONV:%.+]] = trunc{{.+}} [[PF_EUB_RES]] to + // LAMBDA-64: store{{.+}} [[PF_EUB_RES_CONV]],{{.+}} [[OMP_PF_UB]], + // LAMBDA-32: store{{.+}} [[PF_EUB_RES]], {{.+}} [[OMP_PF_UB]], + + // initialize omp.iv (IV = LB) + // LAMBDA: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // LAMBDA: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + + // outer loop: while (IV < UB) { + // LAMBDA-DAG: [[OMP_PF_IV_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // LAMBDA-DAG: [[OMP_PF_UB_VAL_3:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_UB]], + // LAMBDA: [[PF_CMP_IV_UB_1:%.+]] = icmp{{.+}} [[OMP_PF_IV_VAL_1]], [[OMP_PF_UB_VAL_3]] + // LAMBDA: br{{.+}} [[PF_CMP_IV_UB_1]], label %[[OMP_PF_OUTER_LOOP_BODY:.+]], label %[[OMP_PF_OUTER_LOOP_END:.+]] + + // LAMBDA: [[OMP_PF_OUTER_LOOP_BODY]]: + // LAMBDA: br label %[[OMP_PF_INNER_FOR_HEADER:.+]] + + // LAMBDA: [[OMP_PF_INNER_FOR_HEADER]]: + // LAMBDA-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // LAMBDA-DAG: [[OMP_PF_UB_VAL_4:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_UB]], + // LAMBDA: [[PF_CMP_IV_UB_2:%.+]] = icmp{{.+}} [[OMP_PF_IV_VAL_2]], [[OMP_PF_UB_VAL_4]] + // LAMBDA: br{{.+}} [[PF_CMP_IV_UB_2]], label %[[OMP_PF_INNER_LOOP_BODY:.+]], label %[[OMP_PF_INNER_LOOP_END:.+]] + + // LAMBDA: [[OMP_PF_INNER_LOOP_BODY]]: + // LAMBDA-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // skip body branch + // LAMBDA: br{{.+}} + // LAMBDA: br label %[[OMP_PF_INNER_LOOP_INC:.+]] + + // IV = IV + 1 and inner loop latch + // LAMBDA: [[OMP_PF_INNER_LOOP_INC]]: + // LAMBDA-DAG: [[OMP_PF_IV_VAL_3:%.+]] = load{{.+}}, {{.+}}* [[OMP_IV]], + // LAMBDA-DAG: [[OMP_PF_NEXT_IV:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_3]], 1 + // LAMBDA-DAG: store{{.+}} [[OMP_PF_NEXT_IV]], {{.+}}* [[OMP_IV]], + // LAMBDA: br label %[[OMP_PF_INNER_FOR_HEADER]] + + // check NextLB and NextUB + // LAMBDA: [[OMP_PF_INNER_LOOP_END]]: + // LAMBDA: br label %[[OMP_PF_OUTER_LOOP_INC:.+]] + + // LAMBDA: [[OMP_PF_OUTER_LOOP_INC]]: + // LAMBDA-DAG: [[OMP_PF_LB_VAL_2:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // LAMBDA-DAG: [[OMP_PF_ST_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_ST]], + // LAMBDA-DAG: [[OMP_PF_LB_NEXT:%.+]] = add{{.+}} [[OMP_PF_LB_VAL_2]], [[OMP_PF_ST_VAL_1]] + // LAMBDA: store{{.+}} [[OMP_PF_LB_NEXT]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-DAG: [[OMP_PF_UB_VAL_5:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_UB]], + // LAMBDA-DAG: [[OMP_PF_ST_VAL_2:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_ST]], + // LAMBDA-DAG: [[OMP_PF_UB_NEXT:%.+]] = add{{.+}} [[OMP_PF_UB_VAL_5]], [[OMP_PF_ST_VAL_2]] + // LAMBDA: store{{.+}} [[OMP_PF_UB_NEXT]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA: br label %[[OMP_PF_OUTER_LOOP_HEADER]] + + // LAMBDA: [[OMP_PF_OUTER_LOOP_END]]: + // LAMBDA-DAG: call void @__kmpc_for_static_fini( + // LAMBDA: ret + [&]() { + a[i] = b[i] + c[i]; + }(); + } + + // schedule: dynamic no chunk + #pragma omp target + #pragma omp teams + // LAMBDA: define{{.+}} void [[OFFLOADING_FUN_6]]( + // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED_6:@.+]] to {{.+}}) + + #pragma omp distribute parallel for schedule(dynamic) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // LAMBDA: define{{.+}} void [[OMP_OUTLINED_6]]( + // LAMBDA: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + // LAMBDA: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_6:@.+]] to {{.+}}, + // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case + // LAMBDA: ret + + // 'parallel for' implementation using outer and inner loops and PrevEUB + // LAMBDA: define{{.+}} void [[OMP_PARFOR_OUTLINED_6]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}) + // LAMBDA-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_ST:%.omp.stride]] = alloca{{.+}}, + + // initialize lb and ub to PrevLB and PrevUB + // LAMBDA-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], + // LAMBDA-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], + // LAMBDA-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], + // LAMBDA-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} + // LAMBDA-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // LAMBDA-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} + // LAMBDA-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA-DAG: [[OMP_PF_LB_VAL:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // LAMBDA-DAG: [[OMP_PF_UB_VAL:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_UB]], + // LAMBDA: call void @__kmpc_dispatch_init_4({{.+}}, {{.+}}, {{.+}} 35, {{.+}} [[OMP_PF_LB_VAL]], {{.+}} [[OMP_PF_UB_VAL]], {{.+}}, {{.+}}) + // LAMBDA: br label %[[OMP_PF_OUTER_LOOP_HEADER:.+]] + + // LAMBDA: [[OMP_PF_OUTER_LOOP_HEADER]]: + // LAMBDA: [[IS_FIN:%.+]] = call{{.+}} @__kmpc_dispatch_next_4({{.+}}, {{.+}}, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]], {{.+}}* [[OMP_PF_ST]]) + // LAMBDA: [[IS_FIN_CMP:%.+]] = icmp{{.+}} [[IS_FIN]], 0 + // LAMBDA: br{{.+}} [[IS_FIN_CMP]], label %[[OMP_PF_OUTER_LOOP_BODY:.+]], label %[[OMP_PF_OUTER_LOOP_END:.+]] + + // initialize omp.iv (IV = LB) + // LAMBDA: [[OMP_PF_OUTER_LOOP_BODY]]: + // LAMBDA-DAG: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // LAMBDA-DAG: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + // LAMBDA: br label %[[OMP_PF_INNER_LOOP_HEADER:.+]] + + // LAMBDA: [[OMP_PF_INNER_LOOP_HEADER]]: + // LAMBDA-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // LAMBDA-DAG: [[OMP_PF_UB_VAL_4:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_UB]], + // LAMBDA: [[PF_CMP_IV_UB_2:%.+]] = icmp{{.+}} [[OMP_PF_IV_VAL_2]], [[OMP_PF_UB_VAL_4]] + // LAMBDA: br{{.+}} [[PF_CMP_IV_UB_2]], label %[[OMP_PF_INNER_LOOP_BODY:.+]], label %[[OMP_PF_INNER_LOOP_END:.+]] + + // LAMBDA: [[OMP_PF_INNER_LOOP_BODY]]: + // LAMBDA-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // skip body branch + // LAMBDA: br{{.+}} + // LAMBDA: br label %[[OMP_PF_INNER_LOOP_INC:.+]] + + // IV = IV + 1 and inner loop latch + // LAMBDA: [[OMP_PF_INNER_LOOP_INC]]: + // LAMBDA-DAG: [[OMP_PF_IV_VAL_3:%.+]] = load{{.+}}, {{.+}}* [[OMP_IV]], + // LAMBDA-DAG: [[OMP_PF_NEXT_IV:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_3]], 1 + // LAMBDA-DAG: store{{.+}} [[OMP_PF_NEXT_IV]], {{.+}}* [[OMP_IV]], + // LAMBDA: br label %[[OMP_PF_INNER_LOOP_HEADER]] + + // check NextLB and NextUB + // LAMBDA: [[OMP_PF_INNER_LOOP_END]]: + // LAMBDA: br label %[[OMP_PF_OUTER_LOOP_INC:.+]] + + // LAMBDA: [[OMP_PF_OUTER_LOOP_INC]]: + // LAMBDA: br label %[[OMP_PF_OUTER_LOOP_HEADER]] + + // LAMBDA: [[OMP_PF_OUTER_LOOP_END]]: + // LAMBDA: ret + [&]() { + a[i] = b[i] + c[i]; + }(); + } + + // schedule: dynamic chunk + #pragma omp target + #pragma omp teams + // LAMBDA: define{{.+}} void [[OFFLOADING_FUN_7]]( + // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 5, {{.+}}* [[OMP_OUTLINED_7:@.+]] to {{.+}}) + + #pragma omp distribute parallel for schedule(dynamic, ch) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // LAMBDA: define{{.+}} void [[OMP_OUTLINED_7]]( + // LAMBDA: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + // LAMBDA: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_7:@.+]] to {{.+}}, + // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case + // LAMBDA: ret + + // 'parallel for' implementation using outer and inner loops and PrevEUB + // LAMBDA: define{{.+}} void [[OMP_PARFOR_OUTLINED_7]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}, {{.+}}) + // LAMBDA-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + // LAMBDA-DAG: [[OMP_PF_ST:%.omp.stride]] = alloca{{.+}}, + + // initialize lb and ub to PrevLB and PrevUB + // LAMBDA-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], + // LAMBDA-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], + // LAMBDA-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], + // LAMBDA-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} + // LAMBDA-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // LAMBDA-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} + // LAMBDA-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], + // LAMBDA-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], + // LAMBDA-DAG: [[OMP_PF_LB_VAL:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // LAMBDA-DAG: [[OMP_PF_UB_VAL:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_UB]], + // LAMBDA: call void @__kmpc_dispatch_init_4({{.+}}, {{.+}}, {{.+}} 35, {{.+}} [[OMP_PF_LB_VAL]], {{.+}} [[OMP_PF_UB_VAL]], {{.+}}, {{.+}}) + // LAMBDA: br label %[[OMP_PF_OUTER_LOOP_HEADER:.+]] + + // LAMBDA: [[OMP_PF_OUTER_LOOP_HEADER]]: + // LAMBDA: [[IS_FIN:%.+]] = call{{.+}} @__kmpc_dispatch_next_4({{.+}}, {{.+}}, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]], {{.+}}* [[OMP_PF_ST]]) + // LAMBDA: [[IS_FIN_CMP:%.+]] = icmp{{.+}} [[IS_FIN]], 0 + // LAMBDA: br{{.+}} [[IS_FIN_CMP]], label %[[OMP_PF_OUTER_LOOP_BODY:.+]], label %[[OMP_PF_OUTER_LOOP_END:.+]] + + // initialize omp.iv (IV = LB) + // LAMBDA: [[OMP_PF_OUTER_LOOP_BODY]]: + // LAMBDA-DAG: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // LAMBDA-DAG: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + // LAMBDA: br label %[[OMP_PF_INNER_LOOP_HEADER:.+]] + + // LAMBDA: [[OMP_PF_INNER_LOOP_HEADER]]: + // LAMBDA-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // LAMBDA-DAG: [[OMP_PF_UB_VAL_4:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_UB]], + // LAMBDA: [[PF_CMP_IV_UB_2:%.+]] = icmp{{.+}} [[OMP_PF_IV_VAL_2]], [[OMP_PF_UB_VAL_4]] + // LAMBDA: br{{.+}} [[PF_CMP_IV_UB_2]], label %[[OMP_PF_INNER_LOOP_BODY:.+]], label %[[OMP_PF_INNER_LOOP_END:.+]] + + // LAMBDA: [[OMP_PF_INNER_LOOP_BODY]]: + // LAMBDA-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // skip body branch + // LAMBDA: br{{.+}} + // LAMBDA: br label %[[OMP_PF_INNER_LOOP_INC:.+]] + + // IV = IV + 1 and inner loop latch + // LAMBDA: [[OMP_PF_INNER_LOOP_INC]]: + // LAMBDA-DAG: [[OMP_PF_IV_VAL_3:%.+]] = load{{.+}}, {{.+}}* [[OMP_IV]], + // LAMBDA-DAG: [[OMP_PF_NEXT_IV:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_3]], 1 + // LAMBDA-DAG: store{{.+}} [[OMP_PF_NEXT_IV]], {{.+}}* [[OMP_IV]], + // LAMBDA: br label %[[OMP_PF_INNER_LOOP_HEADER]] + + // check NextLB and NextUB + // LAMBDA: [[OMP_PF_INNER_LOOP_END]]: + // LAMBDA: br label %[[OMP_PF_OUTER_LOOP_INC:.+]] + + // LAMBDA: [[OMP_PF_OUTER_LOOP_INC]]: + // LAMBDA: br label %[[OMP_PF_OUTER_LOOP_HEADER]] + + // LAMBDA: [[OMP_PF_OUTER_LOOP_END]]: + // LAMBDA: ret + [&]() { + a[i] = b[i] + c[i]; + }(); + } + }(); + return 0; +#else + // CHECK-LABEL: @main + + // CHECK: call i{{[0-9]+}} @__tgt_target_teams( + // CHECK: call void [[OFFLOADING_FUN_1:@.+]]( + + // CHECK: call i{{[0-9]+}} @__tgt_target_teams( + // CHECK: call void [[OFFLOADING_FUN_2:@.+]]( + + // CHECK: call i{{[0-9]+}} @__tgt_target_teams( + // CHECK: call void [[OFFLOADING_FUN_3:@.+]]( + + // CHECK: call i{{[0-9]+}} @__tgt_target_teams( + // CHECK: call void [[OFFLOADING_FUN_4:@.+]]( + + // CHECK: call i{{[0-9]+}} @__tgt_target_teams( + // CHECK: call void [[OFFLOADING_FUN_5:@.+]]( + + // CHECK: call i{{[0-9]+}} @__tgt_target_teams( + // CHECK: call void [[OFFLOADING_FUN_6:@.+]]( + + // CHECK: call i{{[0-9]+}} @__tgt_target_teams( + // CHECK: call void [[OFFLOADING_FUN_7:@.+]]( + + // CHECK: call{{.+}} [[TMAIN:@.+]]() + + // no schedule clauses + #pragma omp target + #pragma omp teams + // CHECK: define internal void [[OFFLOADING_FUN_1]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED_1:@.+]] to {{.+}}) + + #pragma omp distribute parallel for + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // CHECK: define{{.+}} void [[OMP_OUTLINED_1]]( + // CHECK-DAG: [[OMP_IV:%.omp.iv]] = alloca + // CHECK-DAG: [[OMP_LB:%.omp.comb.lb]] = alloca + // CHECK-DAG: [[OMP_UB:%.omp.comb.ub]] = alloca + // CHECK-DAG: [[OMP_ST:%.omp.stride]] = alloca + + // CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + + // check EUB for distribute + // CHECK-DAG: [[OMP_UB_VAL_1:%.+]] = load{{.+}} [[OMP_UB]], + // CHECK: [[NUM_IT_1:%.+]] = load{{.+}}, + // CHECK-DAG: [[CMP_UB_NUM_IT:%.+]] = icmp sgt {{.+}} [[OMP_UB_VAL_1]], [[NUM_IT_1]] + // CHECK: br {{.+}} [[CMP_UB_NUM_IT]], label %[[EUB_TRUE:.+]], label %[[EUB_FALSE:.+]] + // CHECK-DAG: [[EUB_TRUE]]: + // CHECK: [[NUM_IT_2:%.+]] = load{{.+}}, + // CHECK: br label %[[EUB_END:.+]] + // CHECK-DAG: [[EUB_FALSE]]: + // CHECK: [[OMP_UB_VAL2:%.+]] = load{{.+}} [[OMP_UB]], + // CHECK: br label %[[EUB_END]] + // CHECK-DAG: [[EUB_END]]: + // CHECK-DAG: [[EUB_RES:%.+]] = phi{{.+}} [ [[NUM_IT_2]], %[[EUB_TRUE]] ], [ [[OMP_UB_VAL2]], %[[EUB_FALSE]] ] + // CHECK: store{{.+}} [[EUB_RES]], {{.+}}* [[OMP_UB]], + + // initialize omp.iv + // CHECK: [[OMP_LB_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[OMP_LB]], + // CHECK: store {{.+}} [[OMP_LB_VAL_1]], {{.+}}* [[OMP_IV]], + // CHECK: br label %[[OMP_JUMP_BACK:.+]] + + // check exit condition + // CHECK: [[OMP_JUMP_BACK]]: + // CHECK-DAG: [[OMP_IV_VAL_1:%.+]] = load {{.+}} [[OMP_IV]], + // CHECK-DAG: [[OMP_UB_VAL_3:%.+]] = load {{.+}} [[OMP_UB]], + // CHECK: [[CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_IV_VAL_1]], [[OMP_UB_VAL_3]] + // CHECK: br {{.+}} [[CMP_IV_UB]], label %[[DIST_BODY:.+]], label %[[DIST_END:.+]] + + // check that PrevLB and PrevUB are passed to the 'for' + // CHECK: [[DIST_BODY]]: + // CHECK-DAG: [[OMP_PREV_LB:%.+]] = load {{.+}}, {{.+}} [[OMP_LB]], + // CHECK-64-DAG: [[OMP_PREV_LB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_LB]] to {{.+}} + // CHECK-DAG: [[OMP_PREV_UB:%.+]] = load {{.+}}, {{.+}} [[OMP_UB]], + // CHECK-64-DAG: [[OMP_PREV_UB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_UB]] to {{.+}} + // check that distlb and distub are properly passed to fork_call + // CHECK-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_1:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_EXT]], i{{[0-9]+}} [[OMP_PREV_UB_EXT]], {{.+}}) + // CHECK-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_1:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB]], i{{[0-9]+}} [[OMP_PREV_UB]], {{.+}}) + // CHECK: br label %[[DIST_INC:.+]] + + // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch + // CHECK: [[DIST_INC]]: + // CHECK-DAG: [[OMP_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_IV]], + // CHECK-DAG: [[OMP_ST_VAL_1:%.+]] = load {{.+}}, {{.+}}* [[OMP_ST]], + // CHECK: [[OMP_IV_INC:%.+]] = add{{.+}} [[OMP_IV_VAL_2]], [[OMP_ST_VAL_1]] + // CHECK: store{{.+}} [[OMP_IV_INC]], {{.+}}* [[OMP_IV]], + // CHECK: br label %[[OMP_JUMP_BACK]] + + // CHECK-DAG: call void @__kmpc_for_static_fini( + // CHECK: ret + + // implementation of 'parallel for' + // CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_1]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}) + + // CHECK-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + + // initialize lb and ub to PrevLB and PrevUB + // CHECK-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], + // CHECK-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], + // CHECK-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], + // CHECK-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} + // CHECK-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // CHECK-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} + // CHECK-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], + // CHECK-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], + // CHECK-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], + // CHECK-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], + // CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, {{.+}} 34, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]],{{.+}}) + + // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used + // In this case we use EUB + // CHECK-DAG: [[OMP_PF_UB_VAL_1:%.+]] = load{{.+}} [[OMP_PF_UB]], + // CHECK: [[PF_NUM_IT_1:%.+]] = load{{.+}}, + // CHECK-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_1]], [[PF_NUM_IT_1]] + // CHECK: br i1 [[PF_CMP_UB_NUM_IT]], label %[[PF_EUB_TRUE:.+]], label %[[PF_EUB_FALSE:.+]] + // CHECK: [[PF_EUB_TRUE]]: + // CHECK: [[PF_NUM_IT_2:%.+]] = load{{.+}}, + // CHECK: br label %[[PF_EUB_END:.+]] + // CHECK-DAG: [[PF_EUB_FALSE]]: + // CHECK: [[OMP_PF_UB_VAL2:%.+]] = load{{.+}} [[OMP_PF_UB]], + // CHECK: br label %[[PF_EUB_END]] + // CHECK-DAG: [[PF_EUB_END]]: + // CHECK-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_NUM_IT_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL2]], %[[PF_EUB_FALSE]] ] + // CHECK: store{{.+}} [[PF_EUB_RES]],{{.+}} [[OMP_PF_UB]], + + // initialize omp.iv + // CHECK: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // CHECK: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + // CHECK: br label %[[OMP_PF_JUMP_BACK:.+]] + + // check exit condition + // CHECK: [[OMP_PF_JUMP_BACK]]: + // CHECK-DAG: [[OMP_PF_IV_VAL_1:%.+]] = load {{.+}} [[OMP_PF_IV]], + // CHECK-DAG: [[OMP_PF_UB_VAL_3:%.+]] = load {{.+}} [[OMP_PF_UB]], + // CHECK: [[PF_CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_PF_IV_VAL_1]], [[OMP_PF_UB_VAL_3]] + // CHECK: br {{.+}} [[PF_CMP_IV_UB]], label %[[PF_BODY:.+]], label %[[PF_END:.+]] + + // check that PrevLB and PrevUB are passed to the 'for' + // CHECK: [[PF_BODY]]: + // CHECK-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // CHECK: br label {{.+}} + + // check stride 1 for 'for' in 'distribute parallel for' + // CHECK-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_PF_IV]], + // CHECK: [[OMP_PF_IV_INC:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_2]], 1 + // CHECK: store{{.+}} [[OMP_PF_IV_INC]], {{.+}}* [[OMP_PF_IV]], + // CHECK: br label %[[OMP_PF_JUMP_BACK]] + + // CHECK-DAG: call void @__kmpc_for_static_fini( + // CHECK: ret + } + + // dist_schedule: static no chunk + #pragma omp target + #pragma omp teams + // CHECK: define{{.+}} void [[OFFLOADING_FUN_2]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED_2:@.+]] to {{.+}}) + + #pragma omp distribute parallel for dist_schedule(static) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // CHECK: define{{.+}} void [[OMP_OUTLINED_2]]( + // CHECK-DAG: [[OMP_IV:%.omp.iv]] = alloca + // CHECK-DAG: [[OMP_LB:%.omp.comb.lb]] = alloca + // CHECK-DAG: [[OMP_UB:%.omp.comb.ub]] = alloca + // CHECK-DAG: [[OMP_ST:%.omp.stride]] = alloca + + // CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + + // check EUB for distribute + // CHECK-DAG: [[OMP_UB_VAL_1:%.+]] = load{{.+}} [[OMP_UB]], + // CHECK: [[NUM_IT_1:%.+]] = load{{.+}}, + // CHECK-DAG: [[CMP_UB_NUM_IT:%.+]] = icmp sgt {{.+}} [[OMP_UB_VAL_1]], [[NUM_IT_1]] + // CHECK: br {{.+}} [[CMP_UB_NUM_IT]], label %[[EUB_TRUE:.+]], label %[[EUB_FALSE:.+]] + // CHECK-DAG: [[EUB_TRUE]]: + // CHECK: [[NUM_IT_2:%.+]] = load{{.+}}, + // CHECK: br label %[[EUB_END:.+]] + // CHECK-DAG: [[EUB_FALSE]]: + // CHECK: [[OMP_UB_VAL2:%.+]] = load{{.+}} [[OMP_UB]], + // CHECK: br label %[[EUB_END]] + // CHECK-DAG: [[EUB_END]]: + // CHECK-DAG: [[EUB_RES:%.+]] = phi{{.+}} [ [[NUM_IT_2]], %[[EUB_TRUE]] ], [ [[OMP_UB_VAL2]], %[[EUB_FALSE]] ] + // CHECK: store{{.+}} [[EUB_RES]], {{.+}}* [[OMP_UB]], + + // initialize omp.iv + // CHECK: [[OMP_LB_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[OMP_LB]], + // CHECK: store {{.+}} [[OMP_LB_VAL_1]], {{.+}}* [[OMP_IV]], + // CHECK: br label %[[OMP_JUMP_BACK:.+]] + + // check exit condition + // CHECK: [[OMP_JUMP_BACK]]: + // CHECK-DAG: [[OMP_IV_VAL_1:%.+]] = load {{.+}} [[OMP_IV]], + // CHECK-DAG: [[OMP_UB_VAL_3:%.+]] = load {{.+}} [[OMP_UB]], + // CHECK: [[CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_IV_VAL_1]], [[OMP_UB_VAL_3]] + // CHECK: br {{.+}} [[CMP_IV_UB]], label %[[DIST_BODY:.+]], label %[[DIST_END:.+]] + + // check that PrevLB and PrevUB are passed to the 'for' + // CHECK: [[DIST_BODY]]: + // CHECK-DAG: [[OMP_PREV_LB:%.+]] = load {{.+}}, {{.+}} [[OMP_LB]], + // CHECK-64-DAG: [[OMP_PREV_LB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_LB]] to {{.+}} + // CHECK-DAG: [[OMP_PREV_UB:%.+]] = load {{.+}}, {{.+}} [[OMP_UB]], + // CHECK-64-DAG: [[OMP_PREV_UB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_UB]] to {{.+}} + // check that distlb and distub are properly passed to fork_call + // CHECK-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_2:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_EXT]], i{{[0-9]+}} [[OMP_PREV_UB_EXT]], {{.+}}) + // CHECK-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_2:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB]], i{{[0-9]+}} [[OMP_PREV_UB]], {{.+}}) + // CHECK: br label %[[DIST_INC:.+]] + + // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch + // CHECK: [[DIST_INC]]: + // CHECK-DAG: [[OMP_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_IV]], + // CHECK-DAG: [[OMP_ST_VAL_1:%.+]] = load {{.+}}, {{.+}}* [[OMP_ST]], + // CHECK: [[OMP_IV_INC:%.+]] = add{{.+}} [[OMP_IV_VAL_2]], [[OMP_ST_VAL_1]] + // CHECK: store{{.+}} [[OMP_IV_INC]], {{.+}}* [[OMP_IV]], + // CHECK: br label %[[OMP_JUMP_BACK]] + + // CHECK-DAG: call void @__kmpc_for_static_fini( + // CHECK: ret + + // implementation of 'parallel for' + // CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_2]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}) + + // CHECK-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + + // initialize lb and ub to PrevLB and PrevUB + // CHECK-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], + // CHECK-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], + // CHECK-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], + // CHECK-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} + // CHECK-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // CHECK-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} + // CHECK-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], + // CHECK-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], + // CHECK-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], + // CHECK-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], + // CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, {{.+}} 34, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]],{{.+}}) + + // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used + // In this case we use EUB + // CHECK-DAG: [[OMP_PF_UB_VAL_1:%.+]] = load{{.+}} [[OMP_PF_UB]], + // CHECK: [[PF_NUM_IT_1:%.+]] = load{{.+}}, + // CHECK-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_1]], [[PF_NUM_IT_1]] + // CHECK: br i1 [[PF_CMP_UB_NUM_IT]], label %[[PF_EUB_TRUE:.+]], label %[[PF_EUB_FALSE:.+]] + // CHECK: [[PF_EUB_TRUE]]: + // CHECK: [[PF_NUM_IT_2:%.+]] = load{{.+}}, + // CHECK: br label %[[PF_EUB_END:.+]] + // CHECK-DAG: [[PF_EUB_FALSE]]: + // CHECK: [[OMP_PF_UB_VAL2:%.+]] = load{{.+}} [[OMP_PF_UB]], + // CHECK: br label %[[PF_EUB_END]] + // CHECK-DAG: [[PF_EUB_END]]: + // CHECK-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_NUM_IT_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL2]], %[[PF_EUB_FALSE]] ] + // CHECK: store{{.+}} [[PF_EUB_RES]],{{.+}} [[OMP_PF_UB]], + + // initialize omp.iv + // CHECK: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // CHECK: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + // CHECK: br label %[[OMP_PF_JUMP_BACK:.+]] + + // check exit condition + // CHECK: [[OMP_PF_JUMP_BACK]]: + // CHECK-DAG: [[OMP_PF_IV_VAL_1:%.+]] = load {{.+}} [[OMP_PF_IV]], + // CHECK-DAG: [[OMP_PF_UB_VAL_3:%.+]] = load {{.+}} [[OMP_PF_UB]], + // CHECK: [[PF_CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_PF_IV_VAL_1]], [[OMP_PF_UB_VAL_3]] + // CHECK: br {{.+}} [[PF_CMP_IV_UB]], label %[[PF_BODY:.+]], label %[[PF_END:.+]] + + // check that PrevLB and PrevUB are passed to the 'for' + // CHECK: [[PF_BODY]]: + // CHECK-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // CHECK: br label {{.+}} + + // check stride 1 for 'for' in 'distribute parallel for' + // CHECK-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_PF_IV]], + // CHECK: [[OMP_PF_IV_INC:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_2]], 1 + // CHECK: store{{.+}} [[OMP_PF_IV_INC]], {{.+}}* [[OMP_PF_IV]], + // CHECK: br label %[[OMP_PF_JUMP_BACK]] + + // CHECK-DAG: call void @__kmpc_for_static_fini( + // CHECK: ret + } + + // dist_schedule: static chunk + #pragma omp target + #pragma omp teams + // CHECK: define{{.+}} void [[OFFLOADING_FUN_3]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 5, {{.+}}* [[OMP_OUTLINED_3:@.+]] to {{.+}}) + + #pragma omp distribute parallel for dist_schedule(static, ch) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // CHECK: define{{.+}} void [[OMP_OUTLINED_3]]( + // CHECK-DAG: [[OMP_IV:%.omp.iv]] = alloca + // CHECK-DAG: [[OMP_LB:%.omp.comb.lb]] = alloca + // CHECK-DAG: [[OMP_UB:%.omp.comb.ub]] = alloca + // CHECK-DAG: [[OMP_ST:%.omp.stride]] = alloca + + // unlike the previous tests, in this one we have a outer and inner loop for 'distribute' + // CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 91, + // CHECK: br label %[[DIST_OUTER_LOOP_HEADER:.+]] + + // CHECK: [[DIST_OUTER_LOOP_HEADER]]: + // check EUB for distribute + // CHECK-DAG: [[OMP_UB_VAL_1:%.+]] = load{{.+}} [[OMP_UB]], + // CHECK: [[NUM_IT_1:%.+]] = load{{.+}}, + // CHECK-DAG: [[CMP_UB_NUM_IT:%.+]] = icmp sgt {{.+}} [[OMP_UB_VAL_1]], [[NUM_IT_1]] + // CHECK: br {{.+}} [[CMP_UB_NUM_IT]], label %[[EUB_TRUE:.+]], label %[[EUB_FALSE:.+]] + // CHECK-DAG: [[EUB_TRUE]]: + // CHECK: [[NUM_IT_2:%.+]] = load{{.+}}, + // CHECK: br label %[[EUB_END:.+]] + // CHECK-DAG: [[EUB_FALSE]]: + // CHECK: [[OMP_UB_VAL2:%.+]] = load{{.+}} [[OMP_UB]], + // CHECK: br label %[[EUB_END]] + // CHECK-DAG: [[EUB_END]]: + // CHECK-DAG: [[EUB_RES:%.+]] = phi{{.+}} [ [[NUM_IT_2]], %[[EUB_TRUE]] ], [ [[OMP_UB_VAL2]], %[[EUB_FALSE]] ] + // CHECK: store{{.+}} [[EUB_RES]], {{.+}}* [[OMP_UB]], + + // initialize omp.iv + // CHECK: [[OMP_LB_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[OMP_LB]], + // CHECK: store {{.+}} [[OMP_LB_VAL_1]], {{.+}}* [[OMP_IV]], + + // check exit condition + // CHECK-DAG: [[OMP_IV_VAL_1:%.+]] = load {{.+}} [[OMP_IV]], + // CHECK-DAG: [[OMP_UB_VAL_3:%.+]] = load {{.+}} [[OMP_UB]], + // CHECK: [[CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_IV_VAL_1]], [[OMP_UB_VAL_3]] + // CHECK: br {{.+}} [[CMP_IV_UB]], label %[[DIST_OUTER_LOOP_BODY:.+]], label %[[DIST_OUTER_LOOP_END:.+]] + + // CHECK: [[DIST_OUTER_LOOP_BODY]]: + // CHECK: br label %[[DIST_INNER_LOOP_HEADER:.+]] + + // CHECK: [[DIST_INNER_LOOP_HEADER]]: + // CHECK-DAG: [[OMP_IV_VAL_2:%.+]] = load {{.+}} [[OMP_IV]], + // CHECK-DAG: [[OMP_UB_VAL_4:%.+]] = load {{.+}} [[OMP_UB]], + // CHECK: [[CMP_IV_UB_2:%.+]] = icmp sle {{.+}} [[OMP_IV_VAL_2]], [[OMP_UB_VAL_4]] + // CHECK: br{{.+}} [[CMP_IV_UB_2]], label %[[DIST_INNER_LOOP_BODY:.+]], label %[[DIST_INNER_LOOP_END:.+]] + + // check that PrevLB and PrevUB are passed to the 'for' + // CHECK: [[DIST_INNER_LOOP_BODY]]: + // CHECK-DAG: [[OMP_PREV_LB:%.+]] = load {{.+}}, {{.+}} [[OMP_LB]], + // CHECK-64-DAG: [[OMP_PREV_LB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_LB]] to {{.+}} + // CHECK-DAG: [[OMP_PREV_UB:%.+]] = load {{.+}}, {{.+}} [[OMP_UB]], + // CHECK-64-DAG: [[OMP_PREV_UB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_UB]] to {{.+}} + // check that distlb and distub are properly passed to fork_call + // CHECK-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_3:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_EXT]], i{{[0-9]+}} [[OMP_PREV_UB_EXT]], {{.+}}) + // CHECK-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_3:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB]], i{{[0-9]+}} [[OMP_PREV_UB]], {{.+}}) + // CHECK: br label %[[DIST_INNER_LOOP_INC:.+]] + + // check DistInc + // CHECK: [[DIST_INNER_LOOP_INC]]: + // CHECK-DAG: [[OMP_IV_VAL_3:%.+]] = load {{.+}}, {{.+}}* [[OMP_IV]], + // CHECK-DAG: [[OMP_ST_VAL_1:%.+]] = load {{.+}}, {{.+}}* [[OMP_ST]], + // CHECK: [[OMP_IV_INC:%.+]] = add{{.+}} [[OMP_IV_VAL_3]], [[OMP_ST_VAL_1]] + // CHECK: store{{.+}} [[OMP_IV_INC]], {{.+}}* [[OMP_IV]], + // CHECK: br label %[[DIST_INNER_LOOP_HEADER]] + + // CHECK: [[DIST_INNER_LOOP_END]]: + // CHECK: br label %[[DIST_OUTER_LOOP_INC:.+]] + + // CHECK: [[DIST_OUTER_LOOP_INC]]: + // check NextLB and NextUB + // CHECK-DAG: [[OMP_LB_VAL_2:%.+]] = load{{.+}}, {{.+}} [[OMP_LB]], + // CHECK-DAG: [[OMP_ST_VAL_2:%.+]] = load{{.+}}, {{.+}} [[OMP_ST]], + // CHECK-DAG: [[OMP_LB_NEXT:%.+]] = add{{.+}} [[OMP_LB_VAL_2]], [[OMP_ST_VAL_2]] + // CHECK: store{{.+}} [[OMP_LB_NEXT]], {{.+}}* [[OMP_LB]], + // CHECK-DAG: [[OMP_UB_VAL_5:%.+]] = load{{.+}}, {{.+}} [[OMP_UB]], + // CHECK-DAG: [[OMP_ST_VAL_3:%.+]] = load{{.+}}, {{.+}} [[OMP_ST]], + // CHECK-DAG: [[OMP_UB_NEXT:%.+]] = add{{.+}} [[OMP_UB_VAL_5]], [[OMP_ST_VAL_3]] + // CHECK: store{{.+}} [[OMP_UB_NEXT]], {{.+}}* [[OMP_UB]], + // CHECK: br label %[[DIST_OUTER_LOOP_HEADER]] + + // outer loop exit + // CHECK: [[DIST_OUTER_LOOP_END]]: + // CHECK-DAG: call void @__kmpc_for_static_fini( + // CHECK: ret + + // skip implementation of 'parallel for': using default scheduling and was tested above + } + + // schedule: static no chunk + #pragma omp target + #pragma omp teams + // CHECK: define{{.+}} void [[OFFLOADING_FUN_4]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED_4:@.+]] to {{.+}}) + + #pragma omp distribute parallel for schedule(static) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // CHECK: define{{.+}} void [[OMP_OUTLINED_4]]( + // CHECK-DAG: [[OMP_IV:%.omp.iv]] = alloca + // CHECK-DAG: [[OMP_LB:%.omp.comb.lb]] = alloca + // CHECK-DAG: [[OMP_UB:%.omp.comb.ub]] = alloca + // CHECK-DAG: [[OMP_ST:%.omp.stride]] = alloca + + // CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + // CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_4:@.+]] to {{.+}}, + // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case + // CHECK: ret + + // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default) + // CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_4]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}) + + // CHECK-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + + // initialize lb and ub to PrevLB and PrevUB + // CHECK-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], + // CHECK-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], + // CHECK-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], + // CHECK-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} + // CHECK-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // CHECK-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} + // CHECK-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], + // CHECK-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], + // CHECK-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], + // CHECK-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], + // CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, {{.+}} 34, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]],{{.+}}) + + // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used + // In this case we use EUB + // CHECK-DAG: [[OMP_PF_UB_VAL_1:%.+]] = load{{.+}} [[OMP_PF_UB]], + // CHECK: [[PF_NUM_IT_1:%.+]] = load{{.+}}, + // CHECK-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_1]], [[PF_NUM_IT_1]] + // CHECK: br i1 [[PF_CMP_UB_NUM_IT]], label %[[PF_EUB_TRUE:.+]], label %[[PF_EUB_FALSE:.+]] + // CHECK: [[PF_EUB_TRUE]]: + // CHECK: [[PF_NUM_IT_2:%.+]] = load{{.+}}, + // CHECK: br label %[[PF_EUB_END:.+]] + // CHECK-DAG: [[PF_EUB_FALSE]]: + // CHECK: [[OMP_PF_UB_VAL2:%.+]] = load{{.+}} [[OMP_PF_UB]], + // CHECK: br label %[[PF_EUB_END]] + // CHECK-DAG: [[PF_EUB_END]]: + // CHECK-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_NUM_IT_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL2]], %[[PF_EUB_FALSE]] ] + // CHECK: store{{.+}} [[PF_EUB_RES]],{{.+}} [[OMP_PF_UB]], + + // initialize omp.iv + // CHECK: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // CHECK: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + // CHECK: br label %[[OMP_PF_JUMP_BACK:.+]] + + // check exit condition + // CHECK: [[OMP_PF_JUMP_BACK]]: + // CHECK-DAG: [[OMP_PF_IV_VAL_1:%.+]] = load {{.+}} [[OMP_PF_IV]], + // CHECK-DAG: [[OMP_PF_UB_VAL_3:%.+]] = load {{.+}} [[OMP_PF_UB]], + // CHECK: [[PF_CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_PF_IV_VAL_1]], [[OMP_PF_UB_VAL_3]] + // CHECK: br {{.+}} [[PF_CMP_IV_UB]], label %[[PF_BODY:.+]], label %[[PF_END:.+]] + + // check that PrevLB and PrevUB are passed to the 'for' + // CHECK: [[PF_BODY]]: + // CHECK-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // CHECK: br label {{.+}} + + // check stride 1 for 'for' in 'distribute parallel for' + // CHECK-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_PF_IV]], + // CHECK: [[OMP_PF_IV_INC:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_2]], 1 + // CHECK: store{{.+}} [[OMP_PF_IV_INC]], {{.+}}* [[OMP_PF_IV]], + // CHECK: br label %[[OMP_PF_JUMP_BACK]] + + // CHECK-DAG: call void @__kmpc_for_static_fini( + // CHECK: ret + } + + // schedule: static chunk + #pragma omp target + #pragma omp teams + // CHECK: define{{.+}} void [[OFFLOADING_FUN_5]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 5, {{.+}}* [[OMP_OUTLINED_5:@.+]] to {{.+}}) + + #pragma omp distribute parallel for schedule(static, ch) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // CHECK: define{{.+}} void [[OMP_OUTLINED_5]]( + // CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + // CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_5:@.+]] to {{.+}}, + // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case + // CHECK: ret + + // 'parallel for' implementation using outer and inner loops and PrevEUB + // CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_5]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}, {{.+}}) + // CHECK-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_ST:%.omp.stride]] = alloca{{.+}}, + + // initialize lb and ub to PrevLB and PrevUB + // CHECK-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], + // CHECK-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], + // CHECK-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], + // CHECK-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} + // CHECK-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // CHECK-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} + // CHECK-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], + // CHECK-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], + // CHECK-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], + // CHECK-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], + // CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, {{.+}} 33, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]],{{.+}}) + // CHECK: br label %[[OMP_PF_OUTER_LOOP_HEADER:.+]] + + // check PrevEUB (using PrevUB instead of NumIt as upper bound) + // CHECK: [[OMP_PF_OUTER_LOOP_HEADER]]: + // CHECK-DAG: [[OMP_PF_UB_VAL_1:%.+]] = load{{.+}} [[OMP_PF_UB]], + // CHECK-64-DAG: [[OMP_PF_UB_VAL_CONV:%.+]] = sext{{.+}} [[OMP_PF_UB_VAL_1]] to + // CHECK: [[PF_PREV_UB_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // CHECK-64-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_CONV]], [[PF_PREV_UB_VAL_1]] + // CHECK-32-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_1]], [[PF_PREV_UB_VAL_1]] + // CHECK: br i1 [[PF_CMP_UB_NUM_IT]], label %[[PF_EUB_TRUE:.+]], label %[[PF_EUB_FALSE:.+]] + // CHECK: [[PF_EUB_TRUE]]: + // CHECK: [[PF_PREV_UB_VAL_2:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // CHECK: br label %[[PF_EUB_END:.+]] + // CHECK-DAG: [[PF_EUB_FALSE]]: + // CHECK: [[OMP_PF_UB_VAL_2:%.+]] = load{{.+}} [[OMP_PF_UB]], + // CHECK-64: [[OMP_PF_UB_VAL_2_CONV:%.+]] = sext{{.+}} [[OMP_PF_UB_VAL_2]] to + // CHECK: br label %[[PF_EUB_END]] + // CHECK-DAG: [[PF_EUB_END]]: + // CHECK-64-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_PREV_UB_VAL_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL_2_CONV]], %[[PF_EUB_FALSE]] ] + // CHECK-32-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_PREV_UB_VAL_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL_2]], %[[PF_EUB_FALSE]] ] + // CHECK-64-DAG: [[PF_EUB_RES_CONV:%.+]] = trunc{{.+}} [[PF_EUB_RES]] to + // CHECK-64: store{{.+}} [[PF_EUB_RES_CONV]],{{.+}} [[OMP_PF_UB]], + // CHECK-32: store{{.+}} [[PF_EUB_RES]], {{.+}} [[OMP_PF_UB]], + + // initialize omp.iv (IV = LB) + // CHECK: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // CHECK: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + + // outer loop: while (IV < UB) { + // CHECK-DAG: [[OMP_PF_IV_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // CHECK-DAG: [[OMP_PF_UB_VAL_3:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_UB]], + // CHECK: [[PF_CMP_IV_UB_1:%.+]] = icmp{{.+}} [[OMP_PF_IV_VAL_1]], [[OMP_PF_UB_VAL_3]] + // CHECK: br{{.+}} [[PF_CMP_IV_UB_1]], label %[[OMP_PF_OUTER_LOOP_BODY:.+]], label %[[OMP_PF_OUTER_LOOP_END:.+]] + + // CHECK: [[OMP_PF_OUTER_LOOP_BODY]]: + // CHECK: br label %[[OMP_PF_INNER_FOR_HEADER:.+]] + + // CHECK: [[OMP_PF_INNER_FOR_HEADER]]: + // CHECK-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // CHECK-DAG: [[OMP_PF_UB_VAL_4:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_UB]], + // CHECK: [[PF_CMP_IV_UB_2:%.+]] = icmp{{.+}} [[OMP_PF_IV_VAL_2]], [[OMP_PF_UB_VAL_4]] + // CHECK: br{{.+}} [[PF_CMP_IV_UB_2]], label %[[OMP_PF_INNER_LOOP_BODY:.+]], label %[[OMP_PF_INNER_LOOP_END:.+]] + + // CHECK: [[OMP_PF_INNER_LOOP_BODY]]: + // CHECK-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // skip body branch + // CHECK: br{{.+}} + // CHECK: br label %[[OMP_PF_INNER_LOOP_INC:.+]] + + // IV = IV + 1 and inner loop latch + // CHECK: [[OMP_PF_INNER_LOOP_INC]]: + // CHECK-DAG: [[OMP_PF_IV_VAL_3:%.+]] = load{{.+}}, {{.+}}* [[OMP_IV]], + // CHECK-DAG: [[OMP_PF_NEXT_IV:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_3]], 1 + // CHECK-DAG: store{{.+}} [[OMP_PF_NEXT_IV]], {{.+}}* [[OMP_IV]], + // CHECK: br label %[[OMP_PF_INNER_FOR_HEADER]] + + // check NextLB and NextUB + // CHECK: [[OMP_PF_INNER_LOOP_END]]: + // CHECK: br label %[[OMP_PF_OUTER_LOOP_INC:.+]] + + // CHECK: [[OMP_PF_OUTER_LOOP_INC]]: + // CHECK-DAG: [[OMP_PF_LB_VAL_2:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // CHECK-DAG: [[OMP_PF_ST_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_ST]], + // CHECK-DAG: [[OMP_PF_LB_NEXT:%.+]] = add{{.+}} [[OMP_PF_LB_VAL_2]], [[OMP_PF_ST_VAL_1]] + // CHECK: store{{.+}} [[OMP_PF_LB_NEXT]], {{.+}}* [[OMP_PF_LB]], + // CHECK-DAG: [[OMP_PF_UB_VAL_5:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_UB]], + // CHECK-DAG: [[OMP_PF_ST_VAL_2:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_ST]], + // CHECK-DAG: [[OMP_PF_UB_NEXT:%.+]] = add{{.+}} [[OMP_PF_UB_VAL_5]], [[OMP_PF_ST_VAL_2]] + // CHECK: store{{.+}} [[OMP_PF_UB_NEXT]], {{.+}}* [[OMP_PF_UB]], + // CHECK: br label %[[OMP_PF_OUTER_LOOP_HEADER]] + + // CHECK: [[OMP_PF_OUTER_LOOP_END]]: + // CHECK-DAG: call void @__kmpc_for_static_fini( + // CHECK: ret + } + + // schedule: dynamic no chunk + #pragma omp target + #pragma omp teams + // CHECK: define{{.+}} void [[OFFLOADING_FUN_6]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED_6:@.+]] to {{.+}}) + + #pragma omp distribute parallel for schedule(dynamic) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // CHECK: define{{.+}} void [[OMP_OUTLINED_6]]( + // CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + // CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_6:@.+]] to {{.+}}, + // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case + // CHECK: ret + + // 'parallel for' implementation using outer and inner loops and PrevEUB + // CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_6]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}) + // CHECK-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_ST:%.omp.stride]] = alloca{{.+}}, + + // initialize lb and ub to PrevLB and PrevUB + // CHECK-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], + // CHECK-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], + // CHECK-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], + // CHECK-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} + // CHECK-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // CHECK-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} + // CHECK-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], + // CHECK-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], + // CHECK-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], + // CHECK-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], + // CHECK-DAG: [[OMP_PF_LB_VAL:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // CHECK-DAG: [[OMP_PF_UB_VAL:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_UB]], + // CHECK: call void @__kmpc_dispatch_init_4({{.+}}, {{.+}}, {{.+}} 35, {{.+}} [[OMP_PF_LB_VAL]], {{.+}} [[OMP_PF_UB_VAL]], {{.+}}, {{.+}}) + // CHECK: br label %[[OMP_PF_OUTER_LOOP_HEADER:.+]] + + // CHECK: [[OMP_PF_OUTER_LOOP_HEADER]]: + // CHECK: [[IS_FIN:%.+]] = call{{.+}} @__kmpc_dispatch_next_4({{.+}}, {{.+}}, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]], {{.+}}* [[OMP_PF_ST]]) + // CHECK: [[IS_FIN_CMP:%.+]] = icmp{{.+}} [[IS_FIN]], 0 + // CHECK: br{{.+}} [[IS_FIN_CMP]], label %[[OMP_PF_OUTER_LOOP_BODY:.+]], label %[[OMP_PF_OUTER_LOOP_END:.+]] + + // initialize omp.iv (IV = LB) + // CHECK: [[OMP_PF_OUTER_LOOP_BODY]]: + // CHECK-DAG: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // CHECK-DAG: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + // CHECK: br label %[[OMP_PF_INNER_LOOP_HEADER:.+]] + + // CHECK: [[OMP_PF_INNER_LOOP_HEADER]]: + // CHECK-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // CHECK-DAG: [[OMP_PF_UB_VAL_4:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_UB]], + // CHECK: [[PF_CMP_IV_UB_2:%.+]] = icmp{{.+}} [[OMP_PF_IV_VAL_2]], [[OMP_PF_UB_VAL_4]] + // CHECK: br{{.+}} [[PF_CMP_IV_UB_2]], label %[[OMP_PF_INNER_LOOP_BODY:.+]], label %[[OMP_PF_INNER_LOOP_END:.+]] + + // CHECK: [[OMP_PF_INNER_LOOP_BODY]]: + // CHECK-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // skip body branch + // CHECK: br{{.+}} + // CHECK: br label %[[OMP_PF_INNER_LOOP_INC:.+]] + + // IV = IV + 1 and inner loop latch + // CHECK: [[OMP_PF_INNER_LOOP_INC]]: + // CHECK-DAG: [[OMP_PF_IV_VAL_3:%.+]] = load{{.+}}, {{.+}}* [[OMP_IV]], + // CHECK-DAG: [[OMP_PF_NEXT_IV:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_3]], 1 + // CHECK-DAG: store{{.+}} [[OMP_PF_NEXT_IV]], {{.+}}* [[OMP_IV]], + // CHECK: br label %[[OMP_PF_INNER_LOOP_HEADER]] + + // check NextLB and NextUB + // CHECK: [[OMP_PF_INNER_LOOP_END]]: + // CHECK: br label %[[OMP_PF_OUTER_LOOP_INC:.+]] + + // CHECK: [[OMP_PF_OUTER_LOOP_INC]]: + // CHECK: br label %[[OMP_PF_OUTER_LOOP_HEADER]] + + // CHECK: [[OMP_PF_OUTER_LOOP_END]]: + // CHECK: ret + } + + // schedule: dynamic chunk + #pragma omp target + #pragma omp teams + // CHECK: define{{.+}} void [[OFFLOADING_FUN_7]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 5, {{.+}}* [[OMP_OUTLINED_7:@.+]] to {{.+}}) + + #pragma omp distribute parallel for schedule(dynamic, ch) + for (int i = 0; i < n; ++i) { + a[i] = b[i] + c[i]; + // CHECK: define{{.+}} void [[OMP_OUTLINED_7]]( + // CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + // CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_7:@.+]] to {{.+}}, + // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case + // CHECK: ret + + // 'parallel for' implementation using outer and inner loops and PrevEUB + // CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_7]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}, {{.+}}) + // CHECK-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + // CHECK-DAG: [[OMP_PF_ST:%.omp.stride]] = alloca{{.+}}, + + // initialize lb and ub to PrevLB and PrevUB + // CHECK-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], + // CHECK-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], + // CHECK-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], + // CHECK-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} + // CHECK-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], + // CHECK-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} + // CHECK-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], + // CHECK-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], + // CHECK-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], + // CHECK-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], + // CHECK-DAG: [[OMP_PF_LB_VAL:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // CHECK-DAG: [[OMP_PF_UB_VAL:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_UB]], + // CHECK: call void @__kmpc_dispatch_init_4({{.+}}, {{.+}}, {{.+}} 35, {{.+}} [[OMP_PF_LB_VAL]], {{.+}} [[OMP_PF_UB_VAL]], {{.+}}, {{.+}}) + // CHECK: br label %[[OMP_PF_OUTER_LOOP_HEADER:.+]] + + // CHECK: [[OMP_PF_OUTER_LOOP_HEADER]]: + // CHECK: [[IS_FIN:%.+]] = call{{.+}} @__kmpc_dispatch_next_4({{.+}}, {{.+}}, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]], {{.+}}* [[OMP_PF_ST]]) + // CHECK: [[IS_FIN_CMP:%.+]] = icmp{{.+}} [[IS_FIN]], 0 + // CHECK: br{{.+}} [[IS_FIN_CMP]], label %[[OMP_PF_OUTER_LOOP_BODY:.+]], label %[[OMP_PF_OUTER_LOOP_END:.+]] + + // initialize omp.iv (IV = LB) + // CHECK: [[OMP_PF_OUTER_LOOP_BODY]]: + // CHECK-DAG: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], + // CHECK-DAG: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + // CHECK: br label %[[OMP_PF_INNER_LOOP_HEADER:.+]] + + // CHECK: [[OMP_PF_INNER_LOOP_HEADER]]: + // CHECK-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // CHECK-DAG: [[OMP_PF_UB_VAL_4:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_UB]], + // CHECK: [[PF_CMP_IV_UB_2:%.+]] = icmp{{.+}} [[OMP_PF_IV_VAL_2]], [[OMP_PF_UB_VAL_4]] + // CHECK: br{{.+}} [[PF_CMP_IV_UB_2]], label %[[OMP_PF_INNER_LOOP_BODY:.+]], label %[[OMP_PF_INNER_LOOP_END:.+]] + + // CHECK: [[OMP_PF_INNER_LOOP_BODY]]: + // CHECK-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], + // skip body branch + // CHECK: br{{.+}} + // CHECK: br label %[[OMP_PF_INNER_LOOP_INC:.+]] + + // IV = IV + 1 and inner loop latch + // CHECK: [[OMP_PF_INNER_LOOP_INC]]: + // CHECK-DAG: [[OMP_PF_IV_VAL_3:%.+]] = load{{.+}}, {{.+}}* [[OMP_IV]], + // CHECK-DAG: [[OMP_PF_NEXT_IV:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_3]], 1 + // CHECK-DAG: store{{.+}} [[OMP_PF_NEXT_IV]], {{.+}}* [[OMP_IV]], + // CHECK: br label %[[OMP_PF_INNER_LOOP_HEADER]] + + // check NextLB and NextUB + // CHECK: [[OMP_PF_INNER_LOOP_END]]: + // CHECK: br label %[[OMP_PF_OUTER_LOOP_INC:.+]] + + // CHECK: [[OMP_PF_OUTER_LOOP_INC]]: + // CHECK: br label %[[OMP_PF_OUTER_LOOP_HEADER]] + + // CHECK: [[OMP_PF_OUTER_LOOP_END]]: + // CHECK: ret + } + + return tmain(); +#endif +} + +// check code +// CHECK: define{{.+}} [[TMAIN]]() + +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_1:@.+]]( + +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_2:@.+]]( + +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_3:@.+]]( + +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_4:@.+]]( + +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_5:@.+]]( + +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_6:@.+]]( + +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_7:@.+]]( + +// CHECK: define{{.+}} void [[OFFLOADING_FUN_1]]( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED_1:@.+]] to {{.+}}) + +// CHECK: define{{.+}} void [[OMP_OUTLINED_1]]( +// CHECK-DAG: [[OMP_IV:%.omp.iv]] = alloca +// CHECK-DAG: [[OMP_LB:%.omp.comb.lb]] = alloca +// CHECK-DAG: [[OMP_UB:%.omp.comb.ub]] = alloca +// CHECK-DAG: [[OMP_ST:%.omp.stride]] = alloca + +// CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + +// check EUB for distribute +// CHECK-DAG: [[OMP_UB_VAL_1:%.+]] = load{{.+}} [[OMP_UB]], +// CHECK: [[NUM_IT_1:%.+]] = load{{.+}}, +// CHECK-DAG: [[CMP_UB_NUM_IT:%.+]] = icmp sgt {{.+}} [[OMP_UB_VAL_1]], [[NUM_IT_1]] +// CHECK: br {{.+}} [[CMP_UB_NUM_IT]], label %[[EUB_TRUE:.+]], label %[[EUB_FALSE:.+]] +// CHECK-DAG: [[EUB_TRUE]]: +// CHECK: [[NUM_IT_2:%.+]] = load{{.+}}, +// CHECK: br label %[[EUB_END:.+]] +// CHECK-DAG: [[EUB_FALSE]]: +// CHECK: [[OMP_UB_VAL2:%.+]] = load{{.+}} [[OMP_UB]], +// CHECK: br label %[[EUB_END]] +// CHECK-DAG: [[EUB_END]]: +// CHECK-DAG: [[EUB_RES:%.+]] = phi{{.+}} [ [[NUM_IT_2]], %[[EUB_TRUE]] ], [ [[OMP_UB_VAL2]], %[[EUB_FALSE]] ] +// CHECK: store{{.+}} [[EUB_RES]], {{.+}}* [[OMP_UB]], + +// initialize omp.iv +// CHECK: [[OMP_LB_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[OMP_LB]], +// CHECK: store {{.+}} [[OMP_LB_VAL_1]], {{.+}}* [[OMP_IV]], +// CHECK: br label %[[OMP_JUMP_BACK:.+]] + +// check exit condition +// CHECK: [[OMP_JUMP_BACK]]: +// CHECK-DAG: [[OMP_IV_VAL_1:%.+]] = load {{.+}} [[OMP_IV]], +// CHECK-DAG: [[OMP_UB_VAL_3:%.+]] = load {{.+}} [[OMP_UB]], +// CHECK: [[CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_IV_VAL_1]], [[OMP_UB_VAL_3]] +// CHECK: br {{.+}} [[CMP_IV_UB]], label %[[DIST_BODY:.+]], label %[[DIST_END:.+]] + +// check that PrevLB and PrevUB are passed to the 'for' +// CHECK: [[DIST_BODY]]: +// CHECK-DAG: [[OMP_PREV_LB:%.+]] = load {{.+}}, {{.+}} [[OMP_LB]], +// CHECK-64-DAG: [[OMP_PREV_LB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_LB]] to {{.+}} +// CHECK-DAG: [[OMP_PREV_UB:%.+]] = load {{.+}}, {{.+}} [[OMP_UB]], +// CHECK-64-DAG: [[OMP_PREV_UB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_UB]] to {{.+}} +// check that distlb and distub are properly passed to fork_call +// CHECK-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_1:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_EXT]], i{{[0-9]+}} [[OMP_PREV_UB_EXT]], {{.+}}) +// CHECK-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_1:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB]], i{{[0-9]+}} [[OMP_PREV_UB]], {{.+}}) +// CHECK: br label %[[DIST_INC:.+]] + +// increment by stride (distInc - 'parallel for' executes the whole chunk) and latch +// CHECK: [[DIST_INC]]: +// CHECK-DAG: [[OMP_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_IV]], +// CHECK-DAG: [[OMP_ST_VAL_1:%.+]] = load {{.+}}, {{.+}}* [[OMP_ST]], +// CHECK: [[OMP_IV_INC:%.+]] = add{{.+}} [[OMP_IV_VAL_2]], [[OMP_ST_VAL_1]] +// CHECK: store{{.+}} [[OMP_IV_INC]], {{.+}}* [[OMP_IV]], +// CHECK: br label %[[OMP_JUMP_BACK]] + +// CHECK-DAG: call void @__kmpc_for_static_fini( +// CHECK: ret + +// implementation of 'parallel for' +// CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_1]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}) + +// CHECK-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + +// initialize lb and ub to PrevLB and PrevUB +// CHECK-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], +// CHECK-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], +// CHECK-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], +// CHECK-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} +// CHECK-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], +// CHECK-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} +// CHECK-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], +// CHECK-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], +// CHECK-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], +// CHECK-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], +// CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, {{.+}} 34, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]],{{.+}}) + +// PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used +// In this case we use EUB +// CHECK-DAG: [[OMP_PF_UB_VAL_1:%.+]] = load{{.+}} [[OMP_PF_UB]], +// CHECK: [[PF_NUM_IT_1:%.+]] = load{{.+}}, +// CHECK-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_1]], [[PF_NUM_IT_1]] +// CHECK: br i1 [[PF_CMP_UB_NUM_IT]], label %[[PF_EUB_TRUE:.+]], label %[[PF_EUB_FALSE:.+]] +// CHECK: [[PF_EUB_TRUE]]: +// CHECK: [[PF_NUM_IT_2:%.+]] = load{{.+}}, +// CHECK: br label %[[PF_EUB_END:.+]] +// CHECK-DAG: [[PF_EUB_FALSE]]: +// CHECK: [[OMP_PF_UB_VAL2:%.+]] = load{{.+}} [[OMP_PF_UB]], +// CHECK: br label %[[PF_EUB_END]] +// CHECK-DAG: [[PF_EUB_END]]: +// CHECK-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_NUM_IT_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL2]], %[[PF_EUB_FALSE]] ] +// CHECK: store{{.+}} [[PF_EUB_RES]],{{.+}} [[OMP_PF_UB]], + +// initialize omp.iv +// CHECK: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], +// CHECK: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], +// CHECK: br label %[[OMP_PF_JUMP_BACK:.+]] + +// check exit condition +// CHECK: [[OMP_PF_JUMP_BACK]]: +// CHECK-DAG: [[OMP_PF_IV_VAL_1:%.+]] = load {{.+}} [[OMP_PF_IV]], +// CHECK-DAG: [[OMP_PF_UB_VAL_3:%.+]] = load {{.+}} [[OMP_PF_UB]], +// CHECK: [[PF_CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_PF_IV_VAL_1]], [[OMP_PF_UB_VAL_3]] +// CHECK: br {{.+}} [[PF_CMP_IV_UB]], label %[[PF_BODY:.+]], label %[[PF_END:.+]] + +// check that PrevLB and PrevUB are passed to the 'for' +// CHECK: [[PF_BODY]]: +// CHECK-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], +// CHECK: br label {{.+}} + +// check stride 1 for 'for' in 'distribute parallel for' +// CHECK-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_PF_IV]], +// CHECK: [[OMP_PF_IV_INC:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_2]], 1 +// CHECK: store{{.+}} [[OMP_PF_IV_INC]], {{.+}}* [[OMP_PF_IV]], +// CHECK: br label %[[OMP_PF_JUMP_BACK]] + +// CHECK-DAG: call void @__kmpc_for_static_fini( +// CHECK: ret + +// CHECK: define{{.+}} void [[OFFLOADING_FUN_2]]( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED_2:@.+]] to {{.+}}) + +// CHECK: define{{.+}} void [[OMP_OUTLINED_2]]( +// CHECK-DAG: [[OMP_IV:%.omp.iv]] = alloca +// CHECK-DAG: [[OMP_LB:%.omp.comb.lb]] = alloca +// CHECK-DAG: [[OMP_UB:%.omp.comb.ub]] = alloca +// CHECK-DAG: [[OMP_ST:%.omp.stride]] = alloca + +// CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, + +// check EUB for distribute +// CHECK-DAG: [[OMP_UB_VAL_1:%.+]] = load{{.+}} [[OMP_UB]], +// CHECK: [[NUM_IT_1:%.+]] = load{{.+}}, +// CHECK-DAG: [[CMP_UB_NUM_IT:%.+]] = icmp sgt {{.+}} [[OMP_UB_VAL_1]], [[NUM_IT_1]] +// CHECK: br {{.+}} [[CMP_UB_NUM_IT]], label %[[EUB_TRUE:.+]], label %[[EUB_FALSE:.+]] +// CHECK-DAG: [[EUB_TRUE]]: +// CHECK: [[NUM_IT_2:%.+]] = load{{.+}}, +// CHECK: br label %[[EUB_END:.+]] +// CHECK-DAG: [[EUB_FALSE]]: +// CHECK: [[OMP_UB_VAL2:%.+]] = load{{.+}} [[OMP_UB]], +// CHECK: br label %[[EUB_END]] +// CHECK-DAG: [[EUB_END]]: +// CHECK-DAG: [[EUB_RES:%.+]] = phi{{.+}} [ [[NUM_IT_2]], %[[EUB_TRUE]] ], [ [[OMP_UB_VAL2]], %[[EUB_FALSE]] ] +// CHECK: store{{.+}} [[EUB_RES]], {{.+}}* [[OMP_UB]], + +// initialize omp.iv +// CHECK: [[OMP_LB_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[OMP_LB]], +// CHECK: store {{.+}} [[OMP_LB_VAL_1]], {{.+}}* [[OMP_IV]], +// CHECK: br label %[[OMP_JUMP_BACK:.+]] + +// check exit condition +// CHECK: [[OMP_JUMP_BACK]]: +// CHECK-DAG: [[OMP_IV_VAL_1:%.+]] = load {{.+}} [[OMP_IV]], +// CHECK-DAG: [[OMP_UB_VAL_3:%.+]] = load {{.+}} [[OMP_UB]], +// CHECK: [[CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_IV_VAL_1]], [[OMP_UB_VAL_3]] +// CHECK: br {{.+}} [[CMP_IV_UB]], label %[[DIST_BODY:.+]], label %[[DIST_END:.+]] + +// check that PrevLB and PrevUB are passed to the 'for' +// CHECK: [[DIST_BODY]]: +// CHECK-DAG: [[OMP_PREV_LB:%.+]] = load {{.+}}, {{.+}} [[OMP_LB]], +// CHECK-64-DAG: [[OMP_PREV_LB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_LB]] to {{.+}} +// CHECK-DAG: [[OMP_PREV_UB:%.+]] = load {{.+}}, {{.+}} [[OMP_UB]], +// CHECK-64-DAG: [[OMP_PREV_UB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_UB]] to {{.+}} +// check that distlb and distub are properly passed to fork_call +// CHECK-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_2:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_EXT]], i{{[0-9]+}} [[OMP_PREV_UB_EXT]], {{.+}}) +// CHECK-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_2:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB]], i{{[0-9]+}} [[OMP_PREV_UB]], {{.+}}) +// CHECK: br label %[[DIST_INC:.+]] + +// increment by stride (distInc - 'parallel for' executes the whole chunk) and latch +// CHECK: [[DIST_INC]]: +// CHECK-DAG: [[OMP_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_IV]], +// CHECK-DAG: [[OMP_ST_VAL_1:%.+]] = load {{.+}}, {{.+}}* [[OMP_ST]], +// CHECK: [[OMP_IV_INC:%.+]] = add{{.+}} [[OMP_IV_VAL_2]], [[OMP_ST_VAL_1]] +// CHECK: store{{.+}} [[OMP_IV_INC]], {{.+}}* [[OMP_IV]], +// CHECK: br label %[[OMP_JUMP_BACK]] + +// CHECK-DAG: call void @__kmpc_for_static_fini( +// CHECK: ret + +// implementation of 'parallel for' +// CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_2]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}) + +// CHECK-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + +// initialize lb and ub to PrevLB and PrevUB +// CHECK-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], +// CHECK-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], +// CHECK-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], +// CHECK-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} +// CHECK-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], +// CHECK-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} +// CHECK-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], +// CHECK-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], +// CHECK-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], +// CHECK-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], +// CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, {{.+}} 34, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]],{{.+}}) + +// PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used +// In this case we use EUB +// CHECK-DAG: [[OMP_PF_UB_VAL_1:%.+]] = load{{.+}} [[OMP_PF_UB]], +// CHECK: [[PF_NUM_IT_1:%.+]] = load{{.+}}, +// CHECK-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_1]], [[PF_NUM_IT_1]] +// CHECK: br i1 [[PF_CMP_UB_NUM_IT]], label %[[PF_EUB_TRUE:.+]], label %[[PF_EUB_FALSE:.+]] +// CHECK: [[PF_EUB_TRUE]]: +// CHECK: [[PF_NUM_IT_2:%.+]] = load{{.+}}, +// CHECK: br label %[[PF_EUB_END:.+]] +// CHECK-DAG: [[PF_EUB_FALSE]]: +// CHECK: [[OMP_PF_UB_VAL2:%.+]] = load{{.+}} [[OMP_PF_UB]], +// CHECK: br label %[[PF_EUB_END]] +// CHECK-DAG: [[PF_EUB_END]]: +// CHECK-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_NUM_IT_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL2]], %[[PF_EUB_FALSE]] ] +// CHECK: store{{.+}} [[PF_EUB_RES]],{{.+}} [[OMP_PF_UB]], + +// initialize omp.iv +// CHECK: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], +// CHECK: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], +// CHECK: br label %[[OMP_PF_JUMP_BACK:.+]] + +// check exit condition +// CHECK: [[OMP_PF_JUMP_BACK]]: +// CHECK-DAG: [[OMP_PF_IV_VAL_1:%.+]] = load {{.+}} [[OMP_PF_IV]], +// CHECK-DAG: [[OMP_PF_UB_VAL_3:%.+]] = load {{.+}} [[OMP_PF_UB]], +// CHECK: [[PF_CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_PF_IV_VAL_1]], [[OMP_PF_UB_VAL_3]] +// CHECK: br {{.+}} [[PF_CMP_IV_UB]], label %[[PF_BODY:.+]], label %[[PF_END:.+]] + +// check that PrevLB and PrevUB are passed to the 'for' +// CHECK: [[PF_BODY]]: +// CHECK-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], +// CHECK: br label {{.+}} + +// check stride 1 for 'for' in 'distribute parallel for' +// CHECK-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_PF_IV]], +// CHECK: [[OMP_PF_IV_INC:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_2]], 1 +// CHECK: store{{.+}} [[OMP_PF_IV_INC]], {{.+}}* [[OMP_PF_IV]], +// CHECK: br label %[[OMP_PF_JUMP_BACK]] + +// CHECK-DAG: call void @__kmpc_for_static_fini( +// CHECK: ret + +// CHECK: define{{.+}} void [[OFFLOADING_FUN_3]]( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 5, {{.+}}* [[OMP_OUTLINED_3:@.+]] to {{.+}}) + +// CHECK: define{{.+}} void [[OMP_OUTLINED_3]]( +// CHECK-DAG: [[OMP_IV:%.omp.iv]] = alloca +// CHECK-DAG: [[OMP_LB:%.omp.comb.lb]] = alloca +// CHECK-DAG: [[OMP_UB:%.omp.comb.ub]] = alloca +// CHECK-DAG: [[OMP_ST:%.omp.stride]] = alloca + +// unlike the previous tests, in this one we have a outer and inner loop for 'distribute' +// CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 91, +// CHECK: br label %[[DIST_OUTER_LOOP_HEADER:.+]] + +// CHECK: [[DIST_OUTER_LOOP_HEADER]]: +// check EUB for distribute +// CHECK-DAG: [[OMP_UB_VAL_1:%.+]] = load{{.+}} [[OMP_UB]], +// CHECK: [[NUM_IT_1:%.+]] = load{{.+}}, +// CHECK-DAG: [[CMP_UB_NUM_IT:%.+]] = icmp sgt {{.+}} [[OMP_UB_VAL_1]], [[NUM_IT_1]] +// CHECK: br {{.+}} [[CMP_UB_NUM_IT]], label %[[EUB_TRUE:.+]], label %[[EUB_FALSE:.+]] +// CHECK-DAG: [[EUB_TRUE]]: +// CHECK: [[NUM_IT_2:%.+]] = load{{.+}}, +// CHECK: br label %[[EUB_END:.+]] +// CHECK-DAG: [[EUB_FALSE]]: +// CHECK: [[OMP_UB_VAL2:%.+]] = load{{.+}} [[OMP_UB]], +// CHECK: br label %[[EUB_END]] +// CHECK-DAG: [[EUB_END]]: +// CHECK-DAG: [[EUB_RES:%.+]] = phi{{.+}} [ [[NUM_IT_2]], %[[EUB_TRUE]] ], [ [[OMP_UB_VAL2]], %[[EUB_FALSE]] ] +// CHECK: store{{.+}} [[EUB_RES]], {{.+}}* [[OMP_UB]], + +// initialize omp.iv +// CHECK: [[OMP_LB_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[OMP_LB]], +// CHECK: store {{.+}} [[OMP_LB_VAL_1]], {{.+}}* [[OMP_IV]], + +// check exit condition +// CHECK-DAG: [[OMP_IV_VAL_1:%.+]] = load {{.+}} [[OMP_IV]], +// CHECK-DAG: [[OMP_UB_VAL_3:%.+]] = load {{.+}} [[OMP_UB]], +// CHECK: [[CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_IV_VAL_1]], [[OMP_UB_VAL_3]] +// CHECK: br {{.+}} [[CMP_IV_UB]], label %[[DIST_OUTER_LOOP_BODY:.+]], label %[[DIST_OUTER_LOOP_END:.+]] + +// CHECK: [[DIST_OUTER_LOOP_BODY]]: +// CHECK: br label %[[DIST_INNER_LOOP_HEADER:.+]] + +// CHECK: [[DIST_INNER_LOOP_HEADER]]: +// CHECK-DAG: [[OMP_IV_VAL_2:%.+]] = load {{.+}} [[OMP_IV]], +// CHECK-DAG: [[OMP_UB_VAL_4:%.+]] = load {{.+}} [[OMP_UB]], +// CHECK: [[CMP_IV_UB_2:%.+]] = icmp sle {{.+}} [[OMP_IV_VAL_2]], [[OMP_UB_VAL_4]] +// CHECK: br{{.+}} [[CMP_IV_UB_2]], label %[[DIST_INNER_LOOP_BODY:.+]], label %[[DIST_INNER_LOOP_END:.+]] + +// check that PrevLB and PrevUB are passed to the 'for' +// CHECK: [[DIST_INNER_LOOP_BODY]]: +// CHECK-DAG: [[OMP_PREV_LB:%.+]] = load {{.+}}, {{.+}} [[OMP_LB]], +// CHECK-64-DAG: [[OMP_PREV_LB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_LB]] to {{.+}} +// CHECK-DAG: [[OMP_PREV_UB:%.+]] = load {{.+}}, {{.+}} [[OMP_UB]], +// CHECK-64-DAG: [[OMP_PREV_UB_EXT:%.+]] = zext {{.+}} [[OMP_PREV_UB]] to {{.+}} +// check that distlb and distub are properly passed to fork_call +// CHECK-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_3:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_EXT]], i{{[0-9]+}} [[OMP_PREV_UB_EXT]], {{.+}}) +// CHECK-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_3:@.+]] to {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB]], i{{[0-9]+}} [[OMP_PREV_UB]], {{.+}}) +// CHECK: br label %[[DIST_INNER_LOOP_INC:.+]] + +// check DistInc +// CHECK: [[DIST_INNER_LOOP_INC]]: +// CHECK-DAG: [[OMP_IV_VAL_3:%.+]] = load {{.+}}, {{.+}}* [[OMP_IV]], +// CHECK-DAG: [[OMP_ST_VAL_1:%.+]] = load {{.+}}, {{.+}}* [[OMP_ST]], +// CHECK: [[OMP_IV_INC:%.+]] = add{{.+}} [[OMP_IV_VAL_3]], [[OMP_ST_VAL_1]] +// CHECK: store{{.+}} [[OMP_IV_INC]], {{.+}}* [[OMP_IV]], +// CHECK: br label %[[DIST_INNER_LOOP_HEADER]] + +// CHECK: [[DIST_INNER_LOOP_END]]: +// CHECK: br label %[[DIST_OUTER_LOOP_INC:.+]] + +// CHECK: [[DIST_OUTER_LOOP_INC]]: +// check NextLB and NextUB +// CHECK-DAG: [[OMP_LB_VAL_2:%.+]] = load{{.+}}, {{.+}} [[OMP_LB]], +// CHECK-DAG: [[OMP_ST_VAL_2:%.+]] = load{{.+}}, {{.+}} [[OMP_ST]], +// CHECK-DAG: [[OMP_LB_NEXT:%.+]] = add{{.+}} [[OMP_LB_VAL_2]], [[OMP_ST_VAL_2]] +// CHECK: store{{.+}} [[OMP_LB_NEXT]], {{.+}}* [[OMP_LB]], +// CHECK-DAG: [[OMP_UB_VAL_5:%.+]] = load{{.+}}, {{.+}} [[OMP_UB]], +// CHECK-DAG: [[OMP_ST_VAL_3:%.+]] = load{{.+}}, {{.+}} [[OMP_ST]], +// CHECK-DAG: [[OMP_UB_NEXT:%.+]] = add{{.+}} [[OMP_UB_VAL_5]], [[OMP_ST_VAL_3]] +// CHECK: store{{.+}} [[OMP_UB_NEXT]], {{.+}}* [[OMP_UB]], +// CHECK: br label %[[DIST_OUTER_LOOP_HEADER]] + +// outer loop exit +// CHECK: [[DIST_OUTER_LOOP_END]]: +// CHECK-DAG: call void @__kmpc_for_static_fini( +// CHECK: ret + +// skip implementation of 'parallel for': using default scheduling and was tested above + +// CHECK: define{{.+}} void [[OFFLOADING_FUN_4]]( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED_4:@.+]] to {{.+}}) + +// CHECK: define{{.+}} void [[OMP_OUTLINED_4]]( +// CHECK-DAG: [[OMP_IV:%.omp.iv]] = alloca +// CHECK-DAG: [[OMP_LB:%.omp.comb.lb]] = alloca +// CHECK-DAG: [[OMP_UB:%.omp.comb.ub]] = alloca +// CHECK-DAG: [[OMP_ST:%.omp.stride]] = alloca + +// CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, +// CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_4:@.+]] to {{.+}}, +// skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case +// CHECK: ret + +// 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default) +// CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_4]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}) + +// CHECK-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, + +// initialize lb and ub to PrevLB and PrevUB +// CHECK-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], +// CHECK-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], +// CHECK-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], +// CHECK-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} +// CHECK-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], +// CHECK-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} +// CHECK-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], +// CHECK-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], +// CHECK-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], +// CHECK-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], +// CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, {{.+}} 34, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]],{{.+}}) + +// PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used +// In this case we use EUB +// CHECK-DAG: [[OMP_PF_UB_VAL_1:%.+]] = load{{.+}} [[OMP_PF_UB]], +// CHECK: [[PF_NUM_IT_1:%.+]] = load{{.+}}, +// CHECK-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_1]], [[PF_NUM_IT_1]] +// CHECK: br i1 [[PF_CMP_UB_NUM_IT]], label %[[PF_EUB_TRUE:.+]], label %[[PF_EUB_FALSE:.+]] +// CHECK: [[PF_EUB_TRUE]]: +// CHECK: [[PF_NUM_IT_2:%.+]] = load{{.+}}, +// CHECK: br label %[[PF_EUB_END:.+]] +// CHECK-DAG: [[PF_EUB_FALSE]]: +// CHECK: [[OMP_PF_UB_VAL2:%.+]] = load{{.+}} [[OMP_PF_UB]], +// CHECK: br label %[[PF_EUB_END]] +// CHECK-DAG: [[PF_EUB_END]]: +// CHECK-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_NUM_IT_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL2]], %[[PF_EUB_FALSE]] ] +// CHECK: store{{.+}} [[PF_EUB_RES]],{{.+}} [[OMP_PF_UB]], + +// initialize omp.iv +// CHECK: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], +// CHECK: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], +// CHECK: br label %[[OMP_PF_JUMP_BACK:.+]] + +// check exit condition +// CHECK: [[OMP_PF_JUMP_BACK]]: +// CHECK-DAG: [[OMP_PF_IV_VAL_1:%.+]] = load {{.+}} [[OMP_PF_IV]], +// CHECK-DAG: [[OMP_PF_UB_VAL_3:%.+]] = load {{.+}} [[OMP_PF_UB]], +// CHECK: [[PF_CMP_IV_UB:%.+]] = icmp sle {{.+}} [[OMP_PF_IV_VAL_1]], [[OMP_PF_UB_VAL_3]] +// CHECK: br {{.+}} [[PF_CMP_IV_UB]], label %[[PF_BODY:.+]], label %[[PF_END:.+]] + +// check that PrevLB and PrevUB are passed to the 'for' +// CHECK: [[PF_BODY]]: +// CHECK-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], +// CHECK: br label {{.+}} + +// check stride 1 for 'for' in 'distribute parallel for' +// CHECK-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load {{.+}}, {{.+}}* [[OMP_PF_IV]], +// CHECK: [[OMP_PF_IV_INC:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_2]], 1 +// CHECK: store{{.+}} [[OMP_PF_IV_INC]], {{.+}}* [[OMP_PF_IV]], +// CHECK: br label %[[OMP_PF_JUMP_BACK]] + +// CHECK-DAG: call void @__kmpc_for_static_fini( +// CHECK: ret + +// CHECK: define{{.+}} void [[OFFLOADING_FUN_5]]( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 5, {{.+}}* [[OMP_OUTLINED_5:@.+]] to {{.+}}) + +// CHECK: define{{.+}} void [[OMP_OUTLINED_5]]( +// CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, +// CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_5:@.+]] to {{.+}}, +// skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case +// CHECK: ret + +// 'parallel for' implementation using outer and inner loops and PrevEUB +// CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_5]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}, {{.+}}) +// CHECK-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_ST:%.omp.stride]] = alloca{{.+}}, + +// initialize lb and ub to PrevLB and PrevUB +// CHECK-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], +// CHECK-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], +// CHECK-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], +// CHECK-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} +// CHECK-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], +// CHECK-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} +// CHECK-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], +// CHECK-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], +// CHECK-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], +// CHECK-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], +// CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, {{.+}} 33, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]],{{.+}}) +// CHECK: br label %[[OMP_PF_OUTER_LOOP_HEADER:.+]] + +// check PrevEUB (using PrevUB instead of NumIt as upper bound) +// CHECK: [[OMP_PF_OUTER_LOOP_HEADER]]: +// CHECK-DAG: [[OMP_PF_UB_VAL_1:%.+]] = load{{.+}} [[OMP_PF_UB]], +// CHECK-64-DAG: [[OMP_PF_UB_VAL_CONV:%.+]] = sext{{.+}} [[OMP_PF_UB_VAL_1]] to +// CHECK: [[PF_PREV_UB_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], +// CHECK-64-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_CONV]], [[PF_PREV_UB_VAL_1]] +// CHECK-32-DAG: [[PF_CMP_UB_NUM_IT:%.+]] = icmp{{.+}} [[OMP_PF_UB_VAL_1]], [[PF_PREV_UB_VAL_1]] +// CHECK: br i1 [[PF_CMP_UB_NUM_IT]], label %[[PF_EUB_TRUE:.+]], label %[[PF_EUB_FALSE:.+]] +// CHECK: [[PF_EUB_TRUE]]: +// CHECK: [[PF_PREV_UB_VAL_2:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], +// CHECK: br label %[[PF_EUB_END:.+]] +// CHECK-DAG: [[PF_EUB_FALSE]]: +// CHECK: [[OMP_PF_UB_VAL_2:%.+]] = load{{.+}} [[OMP_PF_UB]], +// CHECK-64: [[OMP_PF_UB_VAL_2_CONV:%.+]] = sext{{.+}} [[OMP_PF_UB_VAL_2]] to +// CHECK: br label %[[PF_EUB_END]] +// CHECK-DAG: [[PF_EUB_END]]: +// CHECK-64-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_PREV_UB_VAL_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL_2_CONV]], %[[PF_EUB_FALSE]] ] +// CHECK-32-DAG: [[PF_EUB_RES:%.+]] = phi{{.+}} [ [[PF_PREV_UB_VAL_2]], %[[PF_EUB_TRUE]] ], [ [[OMP_PF_UB_VAL_2]], %[[PF_EUB_FALSE]] ] +// CHECK-64-DAG: [[PF_EUB_RES_CONV:%.+]] = trunc{{.+}} [[PF_EUB_RES]] to +// CHECK-64: store{{.+}} [[PF_EUB_RES_CONV]],{{.+}} [[OMP_PF_UB]], +// CHECK-32: store{{.+}} [[PF_EUB_RES]], {{.+}} [[OMP_PF_UB]], + +// initialize omp.iv (IV = LB) +// CHECK: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], +// CHECK: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], + +// outer loop: while (IV < UB) { +// CHECK-DAG: [[OMP_PF_IV_VAL_1:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_IV]], +// CHECK-DAG: [[OMP_PF_UB_VAL_3:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_UB]], +// CHECK: [[PF_CMP_IV_UB_1:%.+]] = icmp{{.+}} [[OMP_PF_IV_VAL_1]], [[OMP_PF_UB_VAL_3]] +// CHECK: br{{.+}} [[PF_CMP_IV_UB_1]], label %[[OMP_PF_OUTER_LOOP_BODY:.+]], label %[[OMP_PF_OUTER_LOOP_END:.+]] + +// CHECK: [[OMP_PF_OUTER_LOOP_BODY]]: +// CHECK: br label %[[OMP_PF_INNER_FOR_HEADER:.+]] + +// CHECK: [[OMP_PF_INNER_FOR_HEADER]]: +// CHECK-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_IV]], +// CHECK-DAG: [[OMP_PF_UB_VAL_4:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_UB]], +// CHECK: [[PF_CMP_IV_UB_2:%.+]] = icmp{{.+}} [[OMP_PF_IV_VAL_2]], [[OMP_PF_UB_VAL_4]] +// CHECK: br{{.+}} [[PF_CMP_IV_UB_2]], label %[[OMP_PF_INNER_LOOP_BODY:.+]], label %[[OMP_PF_INNER_LOOP_END:.+]] + +// CHECK: [[OMP_PF_INNER_LOOP_BODY]]: +// CHECK-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], +// skip body branch +// CHECK: br{{.+}} +// CHECK: br label %[[OMP_PF_INNER_LOOP_INC:.+]] + +// IV = IV + 1 and inner loop latch +// CHECK: [[OMP_PF_INNER_LOOP_INC]]: +// CHECK-DAG: [[OMP_PF_IV_VAL_3:%.+]] = load{{.+}}, {{.+}}* [[OMP_IV]], +// CHECK-DAG: [[OMP_PF_NEXT_IV:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_3]], 1 +// CHECK-DAG: store{{.+}} [[OMP_PF_NEXT_IV]], {{.+}}* [[OMP_IV]], +// CHECK: br label %[[OMP_PF_INNER_FOR_HEADER]] + +// check NextLB and NextUB +// CHECK: [[OMP_PF_INNER_LOOP_END]]: +// CHECK: br label %[[OMP_PF_OUTER_LOOP_INC:.+]] + +// CHECK: [[OMP_PF_OUTER_LOOP_INC]]: +// CHECK-DAG: [[OMP_PF_LB_VAL_2:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], +// CHECK-DAG: [[OMP_PF_ST_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_ST]], +// CHECK-DAG: [[OMP_PF_LB_NEXT:%.+]] = add{{.+}} [[OMP_PF_LB_VAL_2]], [[OMP_PF_ST_VAL_1]] +// CHECK: store{{.+}} [[OMP_PF_LB_NEXT]], {{.+}}* [[OMP_PF_LB]], +// CHECK-DAG: [[OMP_PF_UB_VAL_5:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_UB]], +// CHECK-DAG: [[OMP_PF_ST_VAL_2:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_ST]], +// CHECK-DAG: [[OMP_PF_UB_NEXT:%.+]] = add{{.+}} [[OMP_PF_UB_VAL_5]], [[OMP_PF_ST_VAL_2]] +// CHECK: store{{.+}} [[OMP_PF_UB_NEXT]], {{.+}}* [[OMP_PF_UB]], +// CHECK: br label %[[OMP_PF_OUTER_LOOP_HEADER]] + +// CHECK: [[OMP_PF_OUTER_LOOP_END]]: +// CHECK-DAG: call void @__kmpc_for_static_fini( +// CHECK: ret + +// CHECK: define{{.+}} void [[OFFLOADING_FUN_6]]( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED_6:@.+]] to {{.+}}) + +// CHECK: define{{.+}} void [[OMP_OUTLINED_6]]( +// CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, +// CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_6:@.+]] to {{.+}}, +// skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case +// CHECK: ret + +// 'parallel for' implementation using outer and inner loops and PrevEUB +// CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_6]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}) +// CHECK-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_ST:%.omp.stride]] = alloca{{.+}}, + +// initialize lb and ub to PrevLB and PrevUB +// CHECK-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], +// CHECK-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], +// CHECK-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], +// CHECK-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} +// CHECK-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], +// CHECK-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} +// CHECK-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], +// CHECK-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], +// CHECK-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], +// CHECK-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], +// CHECK-DAG: [[OMP_PF_LB_VAL:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], +// CHECK-DAG: [[OMP_PF_UB_VAL:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_UB]], +// CHECK: call void @__kmpc_dispatch_init_4({{.+}}, {{.+}}, {{.+}} 35, {{.+}} [[OMP_PF_LB_VAL]], {{.+}} [[OMP_PF_UB_VAL]], {{.+}}, {{.+}}) +// CHECK: br label %[[OMP_PF_OUTER_LOOP_HEADER:.+]] + +// CHECK: [[OMP_PF_OUTER_LOOP_HEADER]]: +// CHECK: [[IS_FIN:%.+]] = call{{.+}} @__kmpc_dispatch_next_4({{.+}}, {{.+}}, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]], {{.+}}* [[OMP_PF_ST]]) +// CHECK: [[IS_FIN_CMP:%.+]] = icmp{{.+}} [[IS_FIN]], 0 +// CHECK: br{{.+}} [[IS_FIN_CMP]], label %[[OMP_PF_OUTER_LOOP_BODY:.+]], label %[[OMP_PF_OUTER_LOOP_END:.+]] + +// initialize omp.iv (IV = LB) +// CHECK: [[OMP_PF_OUTER_LOOP_BODY]]: +// CHECK-DAG: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], +// CHECK-DAG: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], +// CHECK: br label %[[OMP_PF_INNER_LOOP_HEADER:.+]] + +// CHECK: [[OMP_PF_INNER_LOOP_HEADER]]: +// CHECK-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_IV]], +// CHECK-DAG: [[OMP_PF_UB_VAL_4:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_UB]], +// CHECK: [[PF_CMP_IV_UB_2:%.+]] = icmp{{.+}} [[OMP_PF_IV_VAL_2]], [[OMP_PF_UB_VAL_4]] +// CHECK: br{{.+}} [[PF_CMP_IV_UB_2]], label %[[OMP_PF_INNER_LOOP_BODY:.+]], label %[[OMP_PF_INNER_LOOP_END:.+]] + +// CHECK: [[OMP_PF_INNER_LOOP_BODY]]: +// CHECK-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], +// skip body branch +// CHECK: br{{.+}} +// CHECK: br label %[[OMP_PF_INNER_LOOP_INC:.+]] + +// IV = IV + 1 and inner loop latch +// CHECK: [[OMP_PF_INNER_LOOP_INC]]: +// CHECK-DAG: [[OMP_PF_IV_VAL_3:%.+]] = load{{.+}}, {{.+}}* [[OMP_IV]], +// CHECK-DAG: [[OMP_PF_NEXT_IV:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_3]], 1 +// CHECK-DAG: store{{.+}} [[OMP_PF_NEXT_IV]], {{.+}}* [[OMP_IV]], +// CHECK: br label %[[OMP_PF_INNER_LOOP_HEADER]] + +// check NextLB and NextUB +// CHECK: [[OMP_PF_INNER_LOOP_END]]: +// CHECK: br label %[[OMP_PF_OUTER_LOOP_INC:.+]] + +// CHECK: [[OMP_PF_OUTER_LOOP_INC]]: +// CHECK: br label %[[OMP_PF_OUTER_LOOP_HEADER]] + +// CHECK: [[OMP_PF_OUTER_LOOP_END]]: +// CHECK: ret + +// CHECK: define{{.+}} void [[OFFLOADING_FUN_7]]( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 5, {{.+}}* [[OMP_OUTLINED_7:@.+]] to {{.+}}) + +// CHECK: define{{.+}} void [[OMP_OUTLINED_7]]( +// CHECK: call void @__kmpc_for_static_init_4({{.+}}, {{.+}}, i32 92, +// CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_7:@.+]] to {{.+}}, +// skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case +// CHECK: ret + +// 'parallel for' implementation using outer and inner loops and PrevEUB +// CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_7]]({{.+}}, {{.+}}, i{{[0-9]+}} [[OMP_PREV_LB_IN:%.+]], i{{[0-9]+}} [[OMP_PREV_UB_IN:%.+]], {{.+}}, {{.+}}, {{.+}}, {{.+}}, {{.+}}) +// CHECK-DAG: [[OMP_PF_LB:%.omp.lb]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_UB:%.omp.ub]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_IV:%.omp.iv]] = alloca{{.+}}, +// CHECK-DAG: [[OMP_PF_ST:%.omp.stride]] = alloca{{.+}}, + +// initialize lb and ub to PrevLB and PrevUB +// CHECK-DAG: store{{.+}} [[OMP_PREV_LB_IN]], {{.+}}* [[PREV_LB_ADDR:%.+]], +// CHECK-DAG: store{{.+}} [[OMP_PREV_UB_IN]], {{.+}}* [[PREV_UB_ADDR:%.+]], +// CHECK-DAG: [[OMP_PREV_LB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_LB_ADDR]], +// CHECK-64-DAG: [[OMP_PREV_LB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_LB_VAL]] to {{.+}} +// CHECK-DAG: [[OMP_PREV_UB_VAL:%.+]] = load{{.+}}, {{.+}}* [[PREV_UB_ADDR]], +// CHECK-64-DAG: [[OMP_PREV_UB_TRC:%.+]] = trunc{{.+}} [[OMP_PREV_UB_VAL]] to {{.+}} +// CHECK-64-DAG: store{{.+}} [[OMP_PREV_LB_TRC]], {{.+}}* [[OMP_PF_LB]], +// CHECK-64-DAG: store{{.+}} [[OMP_PREV_UB_TRC]], {{.+}}* [[OMP_PF_UB]], +// CHECK-32-DAG: store{{.+}} [[OMP_PREV_LB_VAL]], {{.+}}* [[OMP_PF_LB]], +// CHECK-32-DAG: store{{.+}} [[OMP_PREV_UB_VAL]], {{.+}}* [[OMP_PF_UB]], +// CHECK-DAG: [[OMP_PF_LB_VAL:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], +// CHECK-DAG: [[OMP_PF_UB_VAL:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_UB]], +// CHECK: call void @__kmpc_dispatch_init_4({{.+}}, {{.+}}, {{.+}} 35, {{.+}} [[OMP_PF_LB_VAL]], {{.+}} [[OMP_PF_UB_VAL]], {{.+}}, {{.+}}) +// CHECK: br label %[[OMP_PF_OUTER_LOOP_HEADER:.+]] + +// CHECK: [[OMP_PF_OUTER_LOOP_HEADER]]: +// CHECK: [[IS_FIN:%.+]] = call{{.+}} @__kmpc_dispatch_next_4({{.+}}, {{.+}}, {{.+}}, {{.+}}* [[OMP_PF_LB]], {{.+}}* [[OMP_PF_UB]], {{.+}}* [[OMP_PF_ST]]) +// CHECK: [[IS_FIN_CMP:%.+]] = icmp{{.+}} [[IS_FIN]], 0 +// CHECK: br{{.+}} [[IS_FIN_CMP]], label %[[OMP_PF_OUTER_LOOP_BODY:.+]], label %[[OMP_PF_OUTER_LOOP_END:.+]] + +// initialize omp.iv (IV = LB) +// CHECK: [[OMP_PF_OUTER_LOOP_BODY]]: +// CHECK-DAG: [[OMP_PF_LB_VAL_1:%.+]] = load{{.+}}, {{.+}} [[OMP_PF_LB]], +// CHECK-DAG: store {{.+}} [[OMP_PF_LB_VAL_1]], {{.+}}* [[OMP_PF_IV]], +// CHECK: br label %[[OMP_PF_INNER_LOOP_HEADER:.+]] + +// CHECK: [[OMP_PF_INNER_LOOP_HEADER]]: +// CHECK-DAG: [[OMP_PF_IV_VAL_2:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_IV]], +// CHECK-DAG: [[OMP_PF_UB_VAL_4:%.+]] = load{{.+}}, {{.+}}* [[OMP_PF_UB]], +// CHECK: [[PF_CMP_IV_UB_2:%.+]] = icmp{{.+}} [[OMP_PF_IV_VAL_2]], [[OMP_PF_UB_VAL_4]] +// CHECK: br{{.+}} [[PF_CMP_IV_UB_2]], label %[[OMP_PF_INNER_LOOP_BODY:.+]], label %[[OMP_PF_INNER_LOOP_END:.+]] + +// CHECK: [[OMP_PF_INNER_LOOP_BODY]]: +// CHECK-DAG: {{.+}} = load{{.+}}, {{.+}}* [[OMP_PF_IV]], +// skip body branch +// CHECK: br{{.+}} +// CHECK: br label %[[OMP_PF_INNER_LOOP_INC:.+]] + +// IV = IV + 1 and inner loop latch +// CHECK: [[OMP_PF_INNER_LOOP_INC]]: +// CHECK-DAG: [[OMP_PF_IV_VAL_3:%.+]] = load{{.+}}, {{.+}}* [[OMP_IV]], +// CHECK-DAG: [[OMP_PF_NEXT_IV:%.+]] = add{{.+}} [[OMP_PF_IV_VAL_3]], 1 +// CHECK-DAG: store{{.+}} [[OMP_PF_NEXT_IV]], {{.+}}* [[OMP_IV]], +// CHECK: br label %[[OMP_PF_INNER_LOOP_HEADER]] + +// check NextLB and NextUB +// CHECK: [[OMP_PF_INNER_LOOP_END]]: +// CHECK: br label %[[OMP_PF_OUTER_LOOP_INC:.+]] + +// CHECK: [[OMP_PF_OUTER_LOOP_INC]]: +// CHECK: br label %[[OMP_PF_OUTER_LOOP_HEADER]] + +// CHECK: [[OMP_PF_OUTER_LOOP_END]]: +// CHECK: ret +#endif diff --git a/test/OpenMP/distribute_parallel_for_firstprivate_codegen.cpp b/test/OpenMP/distribute_parallel_for_firstprivate_codegen.cpp new file mode 100644 index 0000000..d12c0aa --- /dev/null +++ b/test/OpenMP/distribute_parallel_for_firstprivate_codegen.cpp @@ -0,0 +1,619 @@ +// RxUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64 +// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64 +// RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32 +// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32 + +// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 +// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 +// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 +// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 +// expected-no-diagnostics +#ifndef HEADER +#define HEADER + +template +struct S { + T f; + S(T a) : f(a) {} + S() : f() {} + operator T() { return T(); } + ~S() {} +}; + +// CHECK: [[S_FLOAT_TY:%.+]] = type { float } +// CHECK: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} } +template +T tmain() { + S test; + T t_var = T(); + T vec[] = {1, 2}; + S s_arr[] = {1, 2}; + S &var = test; + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for firstprivate(t_var, vec, s_arr, s_arr, var, var) + for (int i = 0; i < 2; ++i) { + vec[i] = t_var; + s_arr[i] = var; + } + return T(); +} + +int main() { + static int svar; + volatile double g; + volatile double &g1 = g; + + #ifdef LAMBDA + // LAMBDA-LABEL: @main + // LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@.+]]( + [&]() { + static float sfvar; + // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( + // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams( + // LAMBDA: call void [[OFFLOADING_FUN:@.+]]( + + // LAMBDA: define{{.+}} void [[OFFLOADING_FUN]]( + // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED:@.+]] to {{.+}}) + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for firstprivate(g, g1, svar, sfvar) + for (int i = 0; i < 2; ++i) { + // LAMBDA-64: define{{.*}} internal{{.*}} void [[OMP_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i{{[0-9]+}} [[G_IN:%.+]], i{{[0-9]+}} [[G1_IN:%.+]], i{{[0-9]+}} [[SVAR_IN:%.+]], i{{[0-9]+}} [[SFVAR_IN:%.+]]) + // LAMBDA-32: define{{.*}} internal{{.*}} void [[OMP_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, double* {{.+}} [[G_IN:%.+]], i{{[0-9]+}} [[G1_IN:%.+]], i{{[0-9]+}} [[SVAR_IN:%.+]], i{{[0-9]+}} [[SFVAR_IN:%.+]]) + + // addr alloca's + // LAMBDA-64: [[G_ADDR:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA-32: [[G_ADDR:%.+]] = alloca double*, + // LAMBDA: [[G1_ADDR:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[SVAR_ADDR:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[SFVAR_ADDR:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[G1_REF:%.+]] = alloca double*, + // LAMBDA: [[TMP:%.+]] = alloca double*, + + // private alloca's + // LAMBDA: [[G_PRIV:%.+]] = alloca double, + // LAMBDA: [[G1_PRIV:%.+]] = alloca double, + // LAMBDA: [[TMP_PRIV:%.+]] = alloca double*, + // LAMBDA: [[SVAR_PRIV:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[SFVAR_PRIV:%.+]] = alloca float, + + // transfer input parameters into addr alloca's + // LAMBDA-DAG: store {{.+}} [[G_IN]], {{.+}} [[G_ADDR]], + // LAMBDA-DAG: store {{.+}} [[G1_IN]], {{.+}} [[G1_ADDR]], + // LAMBDA-DAG: store {{.+}} [[SVAR_IN]], {{.+}} [[SVAR_ADDR]], + // LAMBDA-DAG: store {{.+}} [[SFVAR_IN]], {{.+}} [[SFVAR_ADDR]], + + // init private alloca's with addr alloca's + // g + // LAMBDA-64-DAG: [[G_CONV:%.+]] = bitcast {{.+}}* [[G_ADDR]] to + // LAMBDA-32-DAG: [[G_CONV:%.+]] = load {{.+}}*, {{.+}}** [[G_ADDR]] + // LAMBDA-DAG: [[G_ADDR_VAL:%.+]] = load {{.+}}, {{.+}}* [[G_CONV]], + // LAMBDA-DAG: store {{.+}} [[G_ADDR_VAL]], {{.+}}* [[G_PRIV]], + + // g1 + // LAMBDA-DAG: [[G1_CONV:%.+]] = bitcast {{.+}}* [[G1_ADDR]] to + // LAMBDA-DAG: store {{.+}}* [[G1_CONV]], {{.+}}** [[G1_REF]], + // LAMBDA-DAG: [[G1_REF_VAL:%.+]] = load {{.+}}*, {{.+}}** [[G1_REF]], + // LAMBDA-DAG: store {{.+}}* [[G1_REF_VAL]], {{.+}}** [[TMP]], + // LAMBDA-DAG: [[TMP_REF:%.+]] = load {{.+}}*, {{.+}}** [[TMP]], + // LAMBDA-DAG: [[TMP_VAL:%.+]] = load {{.+}}, {{.+}}* [[TMP_REF]], + // LAMBDA-DAG: store {{.+}} [[TMP_VAL]], {{.+}}* [[G1_PRIV]] + // LAMBDA-DAG: store {{.+}}* [[G1_PRIV]], {{.+}}** [[TMP_PRIV]], + + // svar + // LAMBDA-64-DAG: [[SVAR_CONV:%.+]] = bitcast {{.+}}* [[SVAR_ADDR]] to + // LAMBDA-64-DAG: [[SVAR_VAL:%.+]] = load {{.+}}, {{.+}}* [[SVAR_CONV]], + // LAMBDA-32-DAG: [[SVAR_VAL:%.+]] = load {{.+}}, {{.+}}* [[SVAR_ADDR]], + // LAMBDA-DAG: store {{.+}} [[SVAR_VAL]], {{.+}}* [[SVAR_PRIV]], + + // sfvar + // LAMBDA-DAG: [[SFVAR_CONV:%.+]] = bitcast {{.+}}* [[SFVAR_ADDR]] to + // LAMBDA-DAG: [[SFVAR_VAL:%.+]] = load {{.+}}, {{.+}}* [[SFVAR_CONV]], + // LAMBDA-DAG: store {{.+}} [[SFVAR_VAL]], {{.+}}* [[SFVAR_PRIV]], + + // LAMBDA: call {{.*}}void @__kmpc_for_static_init_4( + // pass firstprivate parameters to parallel outlined function + // g + // LAMBDA-64-DAG: [[G_PRIV_VAL:%.+]] = load {{.+}}, {{.+}}* [[G_PRIV]], + // LAMBDA-64: [[G_CAST_CONV:%.+]] = bitcast {{.+}}* [[G_CAST:%.+]] to + // LAMBDA-64-DAG: store {{.+}} [[G_PRIV_VAL]], {{.+}}* [[G_CAST_CONV]], + // LAMBDA-64-DAG: [[G_PAR:%.+]] = load {{.+}}, {{.+}}* [[G_CAST]], + + // g1 + // LAMBDA-DAG: [[TMP_PRIV_VAL:%.+]] = load {{.+}}, {{.+}}* [[TMP_PRIV]], + // LAMBDA-DAG: [[G1_PRIV_VAL:%.+]] = load {{.+}}, {{.+}}* [[TMP_PRIV_VAL]], + // LAMBDA: [[G1_CAST_CONV:%.+]] = bitcast {{.+}}* [[G1_CAST:%.+]] to + // LAMBDA-DAG: store {{.+}} [[G1_PRIV_VAL]], {{.+}}* [[G1_CAST_CONV]], + // LAMBDA-DAG: [[G1_PAR:%.+]] = load {{.+}}, {{.+}}* [[G1_CAST]], + + // svar + // LAMBDA: [[SVAR_VAL:%.+]] = load {{.+}}, {{.+}}* [[SVAR_PRIV]], + // LAMBDA-64-DAG: [[SVAR_CAST_CONV:%.+]] = bitcast {{.+}}* [[SVAR_CAST:%.+]] to + // LAMBDA-64-DAG: store {{.+}} [[SVAR_VAL]], {{.+}}* [[SVAR_CAST_CONV]], + // LAMBDA-32-DAG: store {{.+}} [[SVAR_VAL]], {{.+}}* [[SVAR_CAST:%.+]], + // LAMBDA-DAG: [[SVAR_PAR:%.+]] = load {{.+}}, {{.+}}* [[SVAR_CAST]], + + // sfvar + // LAMBDA: [[SFVAR_VAL:%.+]] = load {{.+}}, {{.+}}* [[SFVAR_PRIV]], + // LAMBDA-DAG: [[SFVAR_CAST_CONV:%.+]] = bitcast {{.+}}* [[SFVAR_CAST:%.+]] to + // LAMBDA-DAG: store {{.+}} [[SFVAR_VAL]], {{.+}}* [[SFVAR_CAST_CONV]], + // LAMBDA-DAG: [[SFVAR_PAR:%.+]] = load {{.+}}, {{.+}}* [[SFVAR_CAST]], + + // LAMBDA-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED:@.+]] to void ({{.+}})*), {{.+}}, {{.+}}, {{.+}} [[G_PAR]], {{.+}} [[G1_PAR]], {{.+}} [[SVAR_PAR]], {{.+}} [[SFVAR_PAR]]) + // LAMBDA-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED:@.+]] to void ({{.+}})*), {{.+}}, {{.+}}, {{.+}} [[G_PRIV]], {{.+}} [[G1_PAR]], {{.+}} [[SVAR_PAR]], {{.+}} [[SFVAR_PAR]]) + // LAMBDA: call {{.*}}void @__kmpc_for_static_fini( + // LAMBDA: ret void + + + // LAMBDA-64: define{{.+}} void [[OMP_PARFOR_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, {{.+}}, {{.+}}, i{{[0-9]+}} [[G_IN:%.+]], i{{[0-9]+}} [[G1_IN:%.+]], i{{[0-9]+}} [[SVAR_IN:%.+]], i{{[0-9]+}} [[SFVAR_IN:%.+]]) + // LAMBDA-32: define{{.+}} void [[OMP_PARFOR_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, {{.+}}, {{.+}}, double* {{.+}} [[G_IN:%.+]], i{{[0-9]+}} [[G1_IN:%.+]], i{{[0-9]+}} [[SVAR_IN:%.+]], i{{[0-9]+}} [[SFVAR_IN:%.+]]) + // skip initial params + // LAMBDA: {{.+}} = alloca{{.+}}, + // LAMBDA: {{.+}} = alloca{{.+}}, + // LAMBDA: {{.+}} = alloca{{.+}}, + // LAMBDA: {{.+}} = alloca{{.+}}, + + // addr alloca's + // LAMBDA-64: [[G_ADDR:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA-32: [[G_ADDR:%.+]] = alloca double*, + // LAMBDA: [[G1_ADDR:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[SVAR_ADDR:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[SFVAR_ADDR:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[G1_REF:%.+]] = alloca double*, + + // private alloca's (only for 32-bit) + // LAMBDA-32: [[G_PRIV:%.+]] = alloca double, + + // transfer input parameters into addr alloca's + // LAMBDA-DAG: store {{.+}} [[G_IN]], {{.+}} [[G_ADDR]], + // LAMBDA-DAG: store {{.+}} [[G1_IN]], {{.+}} [[G1_ADDR]], + // LAMBDA-DAG: store {{.+}} [[SVAR_IN]], {{.+}} [[SVAR_ADDR]], + // LAMBDA-DAG: store {{.+}} [[SFVAR_IN]], {{.+}} [[SFVAR_ADDR]], + + // prepare parameters for lambda + // g + // LAMBDA-64-DAG: [[G_CONV:%.+]] = bitcast {{.+}}* [[G_ADDR]] to + // LAMBDA-32-DAG: [[G_ADDR_REF:%.+]] = load {{.+}}*, {{.+}}** [[G_ADDR]] + // LAMBDA-32-DAG: [[G_ADDR_VAL:%.+]] = load {{.+}}, {{.+}}* [[G_ADDR_REF]], + // LAMBDA-32-DAG: store {{.+}} [[G_ADDR_VAL]], {{.+}}* [[G_PRIV]], + + // g1 + // LAMBDA-DAG: [[G1_CONV:%.+]] = bitcast {{.+}}* [[G1_ADDR]] to + // LAMBDA-DAG: store {{.+}}* [[G1_CONV]], {{.+}}* [[G1_REF]], + + // svar + // LAMBDA-64-DAG: [[SVAR_CONV:%.+]] = bitcast {{.+}}* [[SVAR_ADDR]] to + + // sfvar + // LAMBDA-DAG: [[SFVAR_CONV:%.+]] = bitcast {{.+}}* [[SFVAR_ADDR]] to + + // LAMBDA: call {{.*}}void @__kmpc_for_static_init_4( + g = 1; + g1 = 1; + svar = 3; + sfvar = 4.0; + // LAMBDA-64: store double 1.0{{.+}}, double* [[G_CONV]], + // LAMBDA-32: store double 1.0{{.+}}, double* [[G_PRIV]], + // LAMBDA: [[G1_REF_REF:%.+]] = load {{.+}}*, {{.+}}** [[G1_REF]], + // LAMBDA: store {{.+}} 1.0{{.+}}, {{.+}}* [[G1_REF_REF]], + // LAMBDA-64: store {{.+}} 3, {{.+}}* [[SVAR_CONV]], + // LAMBDA-32: store {{.+}} 3, {{.+}}* [[SVAR_ADDR]], + // LAMBDA: store {{.+}} 4.0{{.+}}, {{.+}}* [[SFVAR_CONV]], + + // pass params to inner lambda + // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 + // LAMBDA-64: store double* [[G_CONV]], double** [[G_PRIVATE_ADDR_REF]], + // LAMBDA-32: store double* [[G_PRIV]], double** [[G_PRIVATE_ADDR_REF]], + // LAMBDA: [[G1_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 + // LAMBDA: [[G1_REF_REF:%.+]] = load double*, double** [[G1_REF]], + // LAMBDA: store double* [[G1_REF_REF]], double** [[G1_PRIVATE_ADDR_REF]], + // LAMBDA: [[SVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 + // LAMBDA-64: store i{{[0-9]+}}* [[SVAR_CONV]], i{{[0-9]+}}** [[SVAR_PRIVATE_ADDR_REF]] + // LAMBDA-32: store i{{[0-9]+}}* [[SVAR_ADDR]], i{{[0-9]+}}** [[SVAR_PRIVATE_ADDR_REF]] + // LAMBDA: [[SFVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 + // LAMBDA: store float* [[SFVAR_CONV]], float** [[SFVAR_PRIVATE_ADDR_REF]] + // LAMBDA: call{{.*}} void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) + // LAMBDA: call {{.*}}void @__kmpc_for_static_fini( + // LAMBDA: ret void + [&]() { + // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) + // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], + g = 2; + g1 = 2; + svar = 4; + sfvar = 8.0; + // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] + // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 + // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]] + // LAMBDA: store double 2.0{{.+}}, double* [[G_REF]] + + // LAMBDA: [[TMP_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 + // LAMBDA: [[G1_REF:%.+]] = load double*, double** [[TMP_PTR_REF]] + // LAMBDA: store double 2.0{{.+}}, double* [[G1_REF]], + // LAMBDA: [[SVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 + // LAMBDA: [[SVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_PTR_REF]] + // LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SVAR_REF]] + // LAMBDA: [[SFVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 + // LAMBDA: [[SFVAR_REF:%.+]] = load float*, float** [[SFVAR_PTR_REF]] + // LAMBDA: store float 8.0{{.+}}, float* [[SFVAR_REF]] + }(); + } + }(); + return 0; + #else + S test; + int t_var = 0; + int vec[] = {1, 2}; + S s_arr[] = {1, 2}; + S &var = test; + + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for firstprivate(t_var, vec, s_arr, s_arr, var, var, svar) + for (int i = 0; i < 2; ++i) { + vec[i] = t_var; + s_arr[i] = var; + } + return tmain(); + #endif +} + +// CHECK-LABEL: define{{.*}} i{{[0-9]+}} @main() +// CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOAD_FUN_0:@.+]]( +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_DESTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) + +// CHECK: define{{.+}} [[OFFLOAD_FUN_0]](i{{[0-9]+}} [[T_VAR_IN:%.+]], [2 x i{{[0-9]+}}]* {{.+}} [[VEC_IN:%.+]], [2 x [[S_FLOAT_TY]]]* {{.+}} [[S_ARR_IN:%.+]], [[S_FLOAT_TY]]* {{.+}} [[VAR_IN:%.+]], i{{[0-9]+}} [[SVAR_IN:%.+]]) +// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i{{[0-9]+}}, [2 x i{{[0-9]+}}]*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, i{{[0-9]+}})* [[OMP_OUTLINED_0:@.+]] to void +// CHECK: ret + +// CHECK: define internal void [[OMP_OUTLINED_0]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i{{[0-9]+}} [[T_VAR_IN:%.+]], [2 x i{{[0-9]+}}]* {{.+}} [[VEC_IN:%.+]], [2 x [[S_FLOAT_TY]]]* {{.+}} [[S_ARR_IN:%.+]], [[S_FLOAT_TY]]* {{.+}} [[VAR_IN:%.+]], i{{[0-9]+}} [[SVAR_IN:%.+]]) + +// addr alloca's +// CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*, +// CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_FLOAT_TY]]]*, +// CHECK: [[VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*, +// CHECK: [[SVAR_ADDR:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[TMP:%.+]] = alloca [[S_FLOAT_TY]]*, + +// skip loop alloca's +// CHECK: [[OMP_IV:.omp.iv+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_LB:.omp.comb.lb+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_UB:.omp.comb.ub+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_ST:.omp.stride+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_IS_LAST:.omp.is_last+]] = alloca i{{[0-9]+}}, + +// private alloca's +// CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], +// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]], +// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], +// CHECK: [[TMP_PRIV:%.+]] = alloca [[S_FLOAT_TY]]*, +// CHECK: [[SVAR_PRIV:%.+]] = alloca i{{[0-9]+}}, + +// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] + +// init addr alloca's with input values +// CHECK-DAG: store {{.+}} [[T_VAR_IN]], {{.+}}* [[T_VAR_ADDR]], +// CHECK-DAG: store {{.+}} [[VEC_IN]], {{.+}} [[VEC_ADDR]], +// CHECK-DAG: store {{.+}} [[S_ARR_IN]], {{.+}} [[S_ARR_ADDR]], +// CHECK-DAG: store {{.+}} [[VAR_IN]], {{.+}} [[VAR_ADDR]], +// CHECK-DAG: store {{.+}} [[SVAR_IN]], {{.+}} [[SVAR_ADDR]], + +// init private alloca's with addr alloca's +// t-var +// CHECK-64-DAG: [[T_VAR_CONV:%.+]] = bitcast {{.+}} [[T_VAR_ADDR]] to +// CHECK-64-DAG: [[T_VAR_ADDR_VAL:%.+]] = load {{.+}}, {{.+}}* [[T_VAR_CONV]], +// CHECK-32-DAG: [[T_VAR_ADDR_VAL:%.+]] = load {{.+}}, {{.+}}* [[T_VAR_ADDR]], +// CHECK-DAG: store {{.+}} [[T_VAR_ADDR_VAL]], {{.+}} [[T_VAR_PRIV]], + +// vec +// CHECK-DAG: [[VEC_ADDR_VAL:%.+]] = load {{.+}}*, {{.+}}** [[VEC_ADDR]], +// CHECK-DAG: [[VEC_PRIV_BCAST:%.+]] = bitcast {{.+}} [[VEC_PRIV]] to +// CHECK-DAG: [[VEC_ADDR_BCAST:%.+]] = bitcast {{.+}} [[VEC_ADDR_VAL]] to +// CHECK-DAG: call void @llvm.memcpy{{.+}}({{.+}}* [[VEC_PRIV_BCAST]], {{.+}}* [[VEC_ADDR_BCAST]], + +// s_arr +// CHECK-DAG: [[S_ARR_ADDR_VAL:%.+]] = load {{.+}}*, {{.+}}** [[S_ARR_ADDR]], +// CHECK-DAG: [[S_ARR_BGN:%.+]] = getelementptr {{.+}}, {{.+}}* [[S_ARR_PRIV]], +// CHECK-DAG: [[S_ARR_ADDR_BCAST:%.+]] = bitcast {{.+}}* [[S_ARR_ADDR_VAL]] to +// CHECK-DAG: [[S_ARR_BGN_GEP:%.+]] = getelementptr {{.+}}, {{.+}}* [[S_ARR_BGN]], +// CHECK-DAG: [[S_ARR_EMPTY:%.+]] = icmp {{.+}} [[S_ARR_BGN]], [[S_ARR_BGN_GEP]] +// CHECK-DAG: br {{.+}} [[S_ARR_EMPTY]], label %[[CPY_DONE:.+]], label %[[CPY_BODY:.+]] +// CHECK-DAG: [[CPY_BODY]]: +// CHECK-DAG: call void @llvm.memcpy{{.+}}( +// CHECK-DAG: [[CPY_DONE]]: + +// var +// CHECK-DAG: [[TMP_REF:%.+]] = load {{.+}}*, {{.+}}* [[TMP]], +// CHECK-DAG: [[VAR_PRIV_BCAST:%.+]] = bitcast {{.+}}* [[VAR_PRIV]] to +// CHECK-DAG: [[TMP_REF_BCAST:%.+]] = bitcast {{.+}}* [[TMP_REF]] to +// CHECK-DAG: call void @llvm.memcpy.{{.+}}({{.+}}* [[VAR_PRIV_BCAST]], {{.+}}* [[TMP_REF_BCAST]], +// CHECK-DAG: store {{.+}}* [[VAR_PRIV]], {{.+}}** [[TMP_PRIV]], + +// svar +// CHECK-64-DAG: [[SVAR_CONV:%.+]] = bitcast {{.+}}* [[SVAR_ADDR]] to +// CHECK-64-DAG: [[SVAR_CONV_VAL:%.+]] = load {{.+}}, {{.+}}* [[SVAR_CONV]], +// CHECK-32-DAG: [[SVAR_CONV_VAL:%.+]] = load {{.+}}, {{.+}}* [[SVAR_ADDR]], +// CHECK-DAG: store {{.+}} [[SVAR_CONV_VAL]], {{.+}}* [[SVAR_PRIV]], + +// CHECK: call void @__kmpc_for_static_init_4( +// pass private alloca's to fork +// CHECK-DAG: [[T_VAR_PRIV_VAL:%.+]] = load {{.+}}, {{.+}}* [[T_VAR_PRIV]], +// not dag to distinguish with S_VAR_CAST +// CHECK-64: [[T_VAR_CAST_CONV:%.+]] = bitcast {{.+}}* [[T_VAR_CAST:%.+]] to +// CHECK-64-DAG: store {{.+}} [[T_VAR_PRIV_VAL]], {{.+}} [[T_VAR_CAST_CONV]], +// CHECK-32: store {{.+}} [[T_VAR_PRIV_VAL]], {{.+}} [[T_VAR_CAST:%.+]], +// CHECK-DAG: [[T_VAR_CAST_VAL:%.+]] = load {{.+}}, {{.+}}* [[T_VAR_CAST]], +// CHECK-DAG: [[TMP_PRIV_VAL:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[TMP_PRIV]], +// CHECK-DAG: [[SVAR_PRIV_VAL:%.+]] = load {{.+}}, {{.+}}* [[SVAR_PRIV]], +// CHECK-64-DAG: [[SVAR_CAST_CONV:%.+]] = bitcast {{.+}}* [[SVAR_CAST:%.+]] to +// CHECK-64-DAG: store {{.+}} [[SVAR_PRIV_VAL]], {{.+}}* [[SVAR_CAST_CONV]], +// CHECK-32-DAG: store {{.+}} [[SVAR_PRIV_VAL]], {{.+}}* [[SVAR_CAST:%.+]], +// CHECK-DAG: [[SVAR_CAST_VAL:%.+]] = load {{.+}}, {{.+}}* [[SVAR_CAST]], +// CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_0:@.+]] to void ({{.+}})*), {{.+}}, {{.+}}, [2 x i{{[0-9]+}}]* [[VEC_PRIV]], i{{[0-9]+}} [[T_VAR_CAST_VAL]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]], [[S_FLOAT_TY]]* [[TMP_PRIV_VAL]], i{{[0-9]+}} [[SVAR_CAST_VAL]]) +// CHECK: call void @__kmpc_for_static_fini( + +// call destructors: var.. +// CHECK-DAG: call {{.+}} [[S_FLOAT_TY_DEF_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) + +// ..and s_arr +// CHECK: {{.+}}: +// CHECK: [[S_ARR_EL_PAST:%.+]] = phi [[S_FLOAT_TY]]* +// CHECK: [[S_ARR_PRIV_ITEM:%.+]] = getelementptr {{.+}}, {{.+}} [[S_ARR_EL_PAST]], +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_DESTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]]) + +// CHECK: ret void + +// By OpenMP specifications, 'firstprivate' applies to both distribute and parallel for. +// However, the support for 'firstprivate' of 'parallel' is only used when 'parallel' +// is found alone. Therefore we only have one 'firstprivate' support for 'parallel for' +// in combination +// CHECK: define internal void [[OMP_PARFOR_OUTLINED_0]]({{.+}}, {{.+}}, {{.+}}, {{.+}}, [2 x i{{[0-9]+}}]* {{.+}} [[VEC_IN:%.+]], i{{[0-9]+}} [[T_VAR_IN:%.+]], [2 x [[S_FLOAT_TY]]]* {{.+}} [[S_ARR_IN:%.+]], [[S_FLOAT_TY]]* {{.+}} [[VAR_IN:%.+]], i{{[0-9]+}} [[SVAR_IN:%.+]]) + +// addr alloca's +// CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*, +// CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_FLOAT_TY]]]*, +// CHECK: [[VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*, +// CHECK: [[SVAR_ADDR:%.+]] = alloca i{{[0-9]+}}, + +// skip loop alloca's +// CHECK: [[OMP_IV:.omp.iv+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_LB:.omp.lb+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_UB:.omp.ub+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_ST:.omp.stride+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_IS_LAST:.omp.is_last+]] = alloca i{{[0-9]+}}, + +// private alloca's +// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], +// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]], +// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], +// CHECK: [[TMP_PRIV:%.+]] = alloca [[S_FLOAT_TY]]*, + +// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] + +// init addr alloca's with input values +// CHECK-DAG: store {{.+}} [[VEC_IN]], {{.+}} [[VEC_ADDR]], +// CHECK-DAG: store {{.+}} [[T_VAR_IN]], {{.+}}* [[T_VAR_ADDR]], +// CHECK-DAG: store {{.+}} [[S_ARR_IN]], {{.+}} [[S_ARR_ADDR]], +// CHECK-DAG: store {{.+}} [[VAR_IN]], {{.+}} [[VAR_ADDR]], +// CHECK-DAG: store {{.+}} [[SVAR_IN]], {{.+}} [[SVAR_ADDR]], + +// init private alloca's with addr alloca's +// vec +// CHECK-DAG: [[VEC_ADDR_VAL:%.+]] = load {{.+}}*, {{.+}}** [[VEC_ADDR]], +// CHECK-DAG: [[VEC_PRIV_BCAST:%.+]] = bitcast {{.+}} [[VEC_PRIV]] to +// CHECK-DAG: [[VEC_ADDR_BCAST:%.+]] = bitcast {{.+}} [[VEC_ADDR_VAL]] to +// CHECK-DAG: call void @llvm.memcpy{{.+}}({{.+}}* [[VEC_PRIV_BCAST]], {{.+}}* [[VEC_ADDR_BCAST]], + +// s_arr +// CHECK-DAG: [[S_ARR_ADDR_VAL:%.+]] = load {{.+}}*, {{.+}}** [[S_ARR_ADDR]], +// CHECK-DAG: [[S_ARR_BGN:%.+]] = getelementptr {{.+}}, {{.+}}* [[S_ARR_PRIV]], +// CHECK-DAG: [[S_ARR_ADDR_BCAST:%.+]] = bitcast {{.+}}* [[S_ARR_ADDR_VAL]] to +// CHECK-DAG: [[S_ARR_BGN_GEP:%.+]] = getelementptr {{.+}}, {{.+}}* [[S_ARR_BGN]], +// CHECK-DAG: [[S_ARR_EMPTY:%.+]] = icmp {{.+}} [[S_ARR_BGN]], [[S_ARR_BGN_GEP]] +// CHECK-DAG: br {{.+}} [[S_ARR_EMPTY]], label %[[CPY_DONE:.+]], label %[[CPY_BODY:.+]] +// CHECK-DAG: [[CPY_BODY]]: +// CHECK-DAG: call void @llvm.memcpy{{.+}}( +// CHECK-DAG: [[CPY_DONE]]: + +// var +// CHECK-DAG: [[VAR_ADDR_REF:%.+]] = load {{.+}}*, {{.+}}* [[VAR_ADDR]], +// CHECK-DAG: [[VAR_PRIV_BCAST:%.+]] = bitcast {{.+}}* [[VAR_PRIV]] to +// CHECK-DAG: [[VAR_ADDR_BCAST:%.+]] = bitcast {{.+}}* [[VAR_ADDR_REF]] to +// CHECK-DAG: call void @llvm.memcpy.{{.+}}({{.+}}* [[VAR_PRIV_BCAST]], {{.+}}* [[VAR_ADDR_BCAST]], +// CHECK-DAG: store {{.+}}* [[VAR_PRIV]], {{.+}}** [[TMP_PRIV]], + +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: call void @__kmpc_for_static_fini( + +// call destructors: var.. +// CHECK-DAG: call {{.+}} [[S_FLOAT_TY_DEF_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) + +// ..and s_arr +// CHECK: {{.+}}: +// CHECK: [[S_ARR_EL_PAST:%.+]] = phi [[S_FLOAT_TY]]* +// CHECK: [[S_ARR_PRIV_ITEM:%.+]] = getelementptr {{.+}}, {{.+}} [[S_ARR_EL_PAST]], +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_DESTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]]) + +// CHECK: ret void + +// template tmain with S_INT_TY +// CHECK-LABEL: define{{.*}} i{{[0-9]+}} @{{.+}}tmain{{.+}}() +// CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], +// CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOAD_FUN_0:@.+]]( +// CHECK: call {{.*}} [[S_INT_TY_DEF_DESTR:@.+]]([[S_INT_TY]]* [[TEST]]) + +// CHECK: define{{.+}} [[OFFLOAD_FUN_0]](i{{[0-9]+}} [[T_VAR_IN:%.+]], [2 x i{{[0-9]+}}]* {{.+}} [[VEC_IN:%.+]], [2 x [[S_INT_TY]]]* {{.+}} [[S_ARR_IN:%.+]], [[S_INT_TY]]* {{.+}} [[VAR_IN:%.+]]) +// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i{{[0-9]+}}, [2 x i{{[0-9]+}}]*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*)* [[OMP_OUTLINED_0:@.+]] to void +// CHECK: ret + +// CHECK: define internal void [[OMP_OUTLINED_0]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i{{[0-9]+}} [[T_VAR_IN:%.+]], [2 x i{{[0-9]+}}]* {{.+}} [[VEC_IN:%.+]], [2 x [[S_INT_TY]]]* {{.+}} [[S_ARR_IN:%.+]], [[S_INT_TY]]* {{.+}} [[VAR_IN:%.+]]) + +// addr alloca's +// CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*, +// CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_INT_TY]]]*, +// CHECK: [[VAR_ADDR:%.+]] = alloca [[S_INT_TY]]*, +// CHECK: [[TMP:%.+]] = alloca [[S_INT_TY]]*, + +// skip loop alloca's +// CHECK: [[OMP_IV:.omp.iv+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_LB:.omp.comb.lb+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_UB:.omp.comb.ub+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_ST:.omp.stride+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_IS_LAST:.omp.is_last+]] = alloca i{{[0-9]+}}, + +// private alloca's +// CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], +// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], +// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], +// CHECK: [[TMP_PRIV:%.+]] = alloca [[S_INT_TY]]*, + +// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] + +// init addr alloca's with input values +// CHECK-DAG: store {{.+}} [[T_VAR_IN]], {{.+}}* [[T_VAR_ADDR]], +// CHECK-DAG: store {{.+}} [[VEC_IN]], {{.+}} [[VEC_ADDR]], +// CHECK-DAG: store {{.+}} [[S_ARR_IN]], {{.+}} [[S_ARR_ADDR]], +// CHECK-DAG: store {{.+}} [[VAR_IN]], {{.+}} [[VAR_ADDR]], + +// init private alloca's with addr alloca's +// t-var +// CHECK-64-DAG: [[T_VAR_CONV:%.+]] = bitcast {{.+}} [[T_VAR_ADDR]] to +// CHECK-64-DAG: [[T_VAR_ADDR_VAL:%.+]] = load {{.+}}, {{.+}}* [[T_VAR_CONV]], +// CHECK-32-DAG: [[T_VAR_ADDR_VAL:%.+]] = load {{.+}}, {{.+}}* [[T_VAR_ADDR]], +// CHECK-DAG: store {{.+}} [[T_VAR_ADDR_VAL]], {{.+}} [[T_VAR_PRIV]], + +// vec +// CHECK-DAG: [[VEC_ADDR_VAL:%.+]] = load {{.+}}*, {{.+}}** [[VEC_ADDR]], +// CHECK-DAG: [[VEC_PRIV_BCAST:%.+]] = bitcast {{.+}} [[VEC_PRIV]] to +// CHECK-DAG: [[VEC_ADDR_BCAST:%.+]] = bitcast {{.+}} [[VEC_ADDR_VAL]] to +// CHECK-DAG: call void @llvm.memcpy{{.+}}({{.+}}* [[VEC_PRIV_BCAST]], {{.+}}* [[VEC_ADDR_BCAST]], + +// s_arr +// CHECK-DAG: [[S_ARR_ADDR_VAL:%.+]] = load {{.+}}*, {{.+}}** [[S_ARR_ADDR]], +// CHECK-DAG: [[S_ARR_BGN:%.+]] = getelementptr {{.+}}, {{.+}}* [[S_ARR_PRIV]], +// CHECK-DAG: [[S_ARR_ADDR_BCAST:%.+]] = bitcast {{.+}}* [[S_ARR_ADDR_VAL]] to +// CHECK-DAG: [[S_ARR_BGN_GEP:%.+]] = getelementptr {{.+}}, {{.+}}* [[S_ARR_BGN]], +// CHECK-DAG: [[S_ARR_EMPTY:%.+]] = icmp {{.+}} [[S_ARR_BGN]], [[S_ARR_BGN_GEP]] +// CHECK-DAG: br {{.+}} [[S_ARR_EMPTY]], label %[[CPY_DONE:.+]], label %[[CPY_BODY:.+]] +// CHECK-DAG: [[CPY_BODY]]: +// CHECK-DAG: call void @llvm.memcpy{{.+}}( +// CHECK-DAG: [[CPY_DONE]]: + +// var +// CHECK-DAG: [[TMP_REF:%.+]] = load {{.+}}*, {{.+}}* [[TMP]], +// CHECK-DAG: [[VAR_PRIV_BCAST:%.+]] = bitcast {{.+}}* [[VAR_PRIV]] to +// CHECK-DAG: [[TMP_REF_BCAST:%.+]] = bitcast {{.+}}* [[TMP_REF]] to +// CHECK-DAG: call void @llvm.memcpy.{{.+}}({{.+}}* [[VAR_PRIV_BCAST]], {{.+}}* [[TMP_REF_BCAST]], +// CHECK-DAG: store {{.+}}* [[VAR_PRIV]], {{.+}}** [[TMP_PRIV]], + +// CHECK: call void @__kmpc_for_static_init_4( +// pass private alloca's to fork +// CHECK-DAG: [[T_VAR_PRIV_VAL:%.+]] = load {{.+}}, {{.+}}* [[T_VAR_PRIV]], +// not dag to distinguish with S_VAR_CAST +// CHECK-64: [[T_VAR_CAST_CONV:%.+]] = bitcast {{.+}}* [[T_VAR_CAST:%.+]] to +// CHECK-64-DAG: store {{.+}} [[T_VAR_PRIV_VAL]], {{.+}} [[T_VAR_CAST_CONV]], +// CHECK-32: store {{.+}} [[T_VAR_PRIV_VAL]], {{.+}} [[T_VAR_CAST:%.+]], +// CHECK-DAG: [[T_VAR_CAST_VAL:%.+]] = load {{.+}}, {{.+}}* [[T_VAR_CAST]], +// CHECK-DAG: [[TMP_PRIV_VAL:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[TMP_PRIV]], +// CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_0:@.+]] to void ({{.+}})*), {{.+}}, {{.+}}, [2 x i{{[0-9]+}}]* [[VEC_PRIV]], i{{[0-9]+}} [[T_VAR_CAST_VAL]], [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]], [[S_INT_TY]]* [[TMP_PRIV_VAL]]) +// CHECK: call void @__kmpc_for_static_fini( + +// call destructors: var.. +// CHECK-DAG: call {{.+}} [[S_INT_TY_DEF_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) + +// ..and s_arr +// CHECK: {{.+}}: +// CHECK: [[S_ARR_EL_PAST:%.+]] = phi [[S_INT_TY]]* +// CHECK: [[S_ARR_PRIV_ITEM:%.+]] = getelementptr {{.+}}, {{.+}} [[S_ARR_EL_PAST]], +// CHECK: call {{.*}} [[S_INT_TY_DEF_DESTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]]) + +// CHECK: ret void + +// By OpenMP specifications, 'firstprivate' applies to both distribute and parallel for. +// However, the support for 'firstprivate' of 'parallel' is only used when 'parallel' +// is found alone. Therefore we only have one 'firstprivate' support for 'parallel for' +// in combination +// CHECK: define internal void [[OMP_PARFOR_OUTLINED_0]]({{.+}}, {{.+}}, {{.+}}, {{.+}}, [2 x i{{[0-9]+}}]* {{.+}} [[VEC_IN:%.+]], i{{[0-9]+}} [[T_VAR_IN:%.+]], [2 x [[S_INT_TY]]]* {{.+}} [[S_ARR_IN:%.+]], [[S_INT_TY]]* {{.+}} [[VAR_IN:%.+]]) + +// addr alloca's +// CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*, +// CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_INT_TY]]]*, +// CHECK: [[VAR_ADDR:%.+]] = alloca [[S_INT_TY]]*, + +// skip loop alloca's +// CHECK: [[OMP_IV:.omp.iv+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_LB:.omp.lb+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_UB:.omp.ub+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_ST:.omp.stride+]] = alloca i{{[0-9]+}}, +// CHECK: [[OMP_IS_LAST:.omp.is_last+]] = alloca i{{[0-9]+}}, + +// private alloca's +// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], +// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], +// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], +// CHECK: [[TMP_PRIV:%.+]] = alloca [[S_INT_TY]]*, + +// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] + +// init addr alloca's with input values +// CHECK-DAG: store {{.+}} [[VEC_IN]], {{.+}} [[VEC_ADDR]], +// CHECK-DAG: store {{.+}} [[T_VAR_IN]], {{.+}}* [[T_VAR_ADDR]], +// CHECK-DAG: store {{.+}} [[S_ARR_IN]], {{.+}} [[S_ARR_ADDR]], +// CHECK-DAG: store {{.+}} [[VAR_IN]], {{.+}} [[VAR_ADDR]], + +// init private alloca's with addr alloca's +// vec +// CHECK-DAG: [[VEC_ADDR_VAL:%.+]] = load {{.+}}*, {{.+}}** [[VEC_ADDR]], +// CHECK-DAG: [[VEC_PRIV_BCAST:%.+]] = bitcast {{.+}} [[VEC_PRIV]] to +// CHECK-DAG: [[VEC_ADDR_BCAST:%.+]] = bitcast {{.+}} [[VEC_ADDR_VAL]] to +// CHECK-DAG: call void @llvm.memcpy{{.+}}({{.+}}* [[VEC_PRIV_BCAST]], {{.+}}* [[VEC_ADDR_BCAST]], + +// s_arr +// CHECK-DAG: [[S_ARR_ADDR_VAL:%.+]] = load {{.+}}*, {{.+}}** [[S_ARR_ADDR]], +// CHECK-DAG: [[S_ARR_BGN:%.+]] = getelementptr {{.+}}, {{.+}}* [[S_ARR_PRIV]], +// CHECK-DAG: [[S_ARR_ADDR_BCAST:%.+]] = bitcast {{.+}}* [[S_ARR_ADDR_VAL]] to +// CHECK-DAG: [[S_ARR_BGN_GEP:%.+]] = getelementptr {{.+}}, {{.+}}* [[S_ARR_BGN]], +// CHECK-DAG: [[S_ARR_EMPTY:%.+]] = icmp {{.+}} [[S_ARR_BGN]], [[S_ARR_BGN_GEP]] +// CHECK-DAG: br {{.+}} [[S_ARR_EMPTY]], label %[[CPY_DONE:.+]], label %[[CPY_BODY:.+]] +// CHECK-DAG: [[CPY_BODY]]: +// CHECK-DAG: call void @llvm.memcpy{{.+}}( +// CHECK-DAG: [[CPY_DONE]]: + +// var +// CHECK-DAG: [[VAR_ADDR_REF:%.+]] = load {{.+}}*, {{.+}}* [[VAR_ADDR]], +// CHECK-DAG: [[VAR_PRIV_BCAST:%.+]] = bitcast {{.+}}* [[VAR_PRIV]] to +// CHECK-DAG: [[VAR_ADDR_BCAST:%.+]] = bitcast {{.+}}* [[VAR_ADDR_REF]] to +// CHECK-DAG: call void @llvm.memcpy.{{.+}}({{.+}}* [[VAR_PRIV_BCAST]], {{.+}}* [[VAR_ADDR_BCAST]], +// CHECK-DAG: store {{.+}}* [[VAR_PRIV]], {{.+}}** [[TMP_PRIV]], + +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: call void @__kmpc_for_static_fini( + +// call destructors: var.. +// CHECK-DAG: call {{.+}} [[S_INT_TY_DEF_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) + +// ..and s_arr +// CHECK: {{.+}}: +// CHECK: [[S_ARR_EL_PAST:%.+]] = phi [[S_INT_TY]]* +// CHECK: [[S_ARR_PRIV_ITEM:%.+]] = getelementptr {{.+}}, {{.+}} [[S_ARR_EL_PAST]], +// CHECK: call {{.*}} [[S_INT_TY_DEF_DESTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]]) + +// CHECK: ret void + +#endif diff --git a/test/OpenMP/distribute_parallel_for_if_codegen.cpp b/test/OpenMP/distribute_parallel_for_if_codegen.cpp new file mode 100644 index 0000000..e6cd721 --- /dev/null +++ b/test/OpenMP/distribute_parallel_for_if_codegen.cpp @@ -0,0 +1,192 @@ +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple %itanium_abi_triple -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple %itanium_abi_triple -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix=CHECK %s +// expected-no-diagnostics +#ifndef HEADER +#define HEADER + +void fn1(); +void fn2(); +void fn3(); +void fn4(); +void fn5(); +void fn6(); + +int Arg; + +// CHECK-LABEL: define {{.*}}void @{{.+}}gtid_test +void gtid_test() { +#pragma omp target +#pragma omp teams +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_0:@.+]]( +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_1:@.+]]( +#pragma omp distribute parallel for + for(int i = 0 ; i < 100; i++) {} + // CHECK: define internal void [[OFFLOADING_FUN_0]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 0, {{.+}}* [[OMP_TEAMS_OUTLINED_0:@.+]] to {{.+}}) + // CHECK: define{{.+}} void [[OMP_TEAMS_OUTLINED_0]]( + // CHECK: call void @__kmpc_for_static_init_4( + // CHECK: call void {{.+}} @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{.+}} 2, {{.+}}* [[OMP_OUTLINED_0:@.+]] to void + // CHECK: call void @__kmpc_for_static_fini( + + // CHECK: define{{.+}} void [[OMP_OUTLINED_0]]( + // CHECK: call void @__kmpc_for_static_init_4( + // CHECK: call void @__kmpc_for_static_fini( + // CHECK: ret +#pragma omp target +#pragma omp teams +#pragma omp distribute parallel for if (parallel: false) + for(int i = 0 ; i < 100; i++) { + // CHECK: define internal void [[OFFLOADING_FUN_1]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 0, {{.+}}* [[OMP_TEAMS_OUTLINED_1:@.+]] to {{.+}}) + // CHECK: define{{.+}} void [[OMP_TEAMS_OUTLINED_1]]( + // CHECK: call void @__kmpc_for_static_init_4( + // CHECK: call void @__kmpc_serialized_parallel( + // CHECK: call void [[OMP_OUTLINED_1:@.+]]( + // CHECK: call void @__kmpc_end_serialized_parallel( + // CHECK: call void @__kmpc_for_static_fini( + // CHECK: define{{.+}} void [[OMP_OUTLINED_1]]( + // CHECK: call void @__kmpc_for_static_init_4( + // CHECK: call void @{{.+}}gtid_test + // CHECK: call void @__kmpc_for_static_fini( + // CHECK: ret + gtid_test(); + } +} + + +template +int tmain(T Arg) { +#pragma omp target +#pragma omp teams +#pragma omp distribute parallel for if (true) + for(int i = 0 ; i < 100; i++) { + fn1(); + } +#pragma omp target +#pragma omp teams +#pragma omp distribute parallel for if (false) + for(int i = 0 ; i < 100; i++) { + fn2(); + } +#pragma omp target +#pragma omp teams +#pragma omp distribute parallel for if (parallel: Arg) + for(int i = 0 ; i < 100; i++) { + fn3(); + } + return 0; +} + +// CHECK-LABEL: define {{.*}}i{{[0-9]+}} @main() +int main() { +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_0:@.+]]( +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_1:@.+]]( +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_2:@.+]]( +// CHECK: = call {{.*}}i{{.+}} @{{.+}}tmain +#pragma omp target +#pragma omp teams +#pragma omp distribute parallel for if (true) + for(int i = 0 ; i < 100; i++) { + // CHECK: define internal void [[OFFLOADING_FUN_0]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 0, {{.+}}* [[OMP_TEAMS_OUTLINED_0:@.+]] to {{.+}}) + // CHECK: define{{.+}} void [[OMP_TEAMS_OUTLINED_0]]( + + // CHECK: call void @__kmpc_for_static_init_4( + // CHECK: call void {{.+}} @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{.+}} 2, {{.+}}* [[OMP_OUTLINED_2:@.+]] to void + // CHECK: call void @__kmpc_for_static_fini( + // CHECK: define{{.+}} void [[OMP_OUTLINED_2]]( + // CHECK: call void @__kmpc_for_static_init_4( + // CHECK: call {{.*}}void @{{.+}}fn4 + // CHECK: call void @__kmpc_for_static_fini( + + fn4(); + } + +#pragma omp target +#pragma omp teams +#pragma omp distribute parallel for if (false) + for(int i = 0 ; i < 100; i++) { + // CHECK: define internal void [[OFFLOADING_FUN_1]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 0, {{.+}}* [[OMP_TEAMS_OUTLINED_1:@.+]] to {{.+}}) + // CHECK: define{{.+}} void [[OMP_TEAMS_OUTLINED_1]]( + + // CHECK: call void @__kmpc_for_static_init_4( + // CHECK: call void @__kmpc_serialized_parallel( + // CHECK: call void [[OMP_OUTLINED_3:@.+]]( + // CHECK: call void @__kmpc_end_serialized_parallel( + // CHECK: call void @__kmpc_for_static_fini( + + // CHECK: define{{.+}} void [[OMP_OUTLINED_3]]( + // CHECK: call void @__kmpc_for_static_init_4( + // CHECK: call {{.*}}void @{{.+}}fn5 + // CHECK: call void @__kmpc_for_static_fini( + fn5(); + } + +#pragma omp target +#pragma omp teams +#pragma omp distribute parallel for if (Arg) + for(int i = 0 ; i < 100; i++) { + // CHECK: define internal void [[OFFLOADING_FUN_2]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 1, {{.+}}* [[OMP_TEAMS_OUTLINED_2:@.+]] to {{.+}}) + // CHECK: define{{.+}} void [[OMP_TEAMS_OUTLINED_2]]( + + // CHECK: call void @__kmpc_for_static_init_4( + // CHECK: call void {{.+}} @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{.+}} 2, {{.+}}* [[OMP_OUTLINED_4:@.+]] to void + // CHECK: call void @__kmpc_serialized_parallel( + // CHECK: call void [[OMP_OUTLINED_4:@.+]]( + // CHECK: call void @__kmpc_end_serialized_parallel( + // CHECK: call void @__kmpc_for_static_fini( + + // CHECK: define{{.+}} void [[OMP_OUTLINED_4]]( + // CHECK: call void @__kmpc_for_static_init_4( + // CHECK: call {{.*}}void @{{.+}}fn6 + // CHECK: call void @__kmpc_for_static_fini( + fn6(); + } + + return tmain(Arg); +} + +// CHECK-LABEL: define {{.+}} @{{.+}}tmain + +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{.+}} 2, void {{.+}}* [[T_OUTLINE_FUN_1:@.+]] to void +// CHECK: call void @__kmpc_for_static_fini( + +// CHECK: define internal {{.*}}void [[T_OUTLINE_FUN_1]] +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: call {{.*}}void @{{.+}}fn1 +// CHECK: call void @__kmpc_for_static_fini( +// CHECK: ret void + +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: call {{.*}}void @__kmpc_serialized_parallel( +// CHECK: call void [[T_OUTLINE_FUN_2:@.+]]( +// CHECK: call {{.*}}void @__kmpc_end_serialized_parallel( +// CHECK: call void @__kmpc_for_static_fini( + +// CHECK: define internal {{.*}}void [[T_OUTLINE_FUN_2]] +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: call {{.*}}void @{{.+}}fn2 +// CHECK: call void @__kmpc_for_static_fini( +// CHECK: ret void + +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{.+}} 2, void {{.+}}* [[T_OUTLINE_FUN_3:@.+]] to void +// CHECK: call {{.*}}void @__kmpc_serialized_parallel( +// call void [[T_OUTLINE_FUN_3:@.+]]( +// CHECK: call {{.*}}void @__kmpc_end_serialized_parallel( + +// CHECK: define internal {{.*}}void [[T_OUTLINE_FUN_3]] +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: call {{.*}}void @{{.+}}fn3 +// CHECK: call void @__kmpc_for_static_fini( +// CHECK: ret void +#endif diff --git a/test/OpenMP/distribute_parallel_for_lastprivate_codegen.cpp b/test/OpenMP/distribute_parallel_for_lastprivate_codegen.cpp new file mode 100644 index 0000000..2e8da79 --- /dev/null +++ b/test/OpenMP/distribute_parallel_for_lastprivate_codegen.cpp @@ -0,0 +1,653 @@ +// RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64 +// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64 +// RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32 +// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32 + +// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 +// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 +// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 +// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 +// expected-no-diagnostics +#ifndef HEADER +#define HEADER + +template +struct S { + T f; + S(T a) : f(a) {} + S() : f() {} + operator T() { return T(); } + ~S() {} +}; + +// CHECK: [[S_FLOAT_TY:%.+]] = type { float } +// CHECK: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} } +template +T tmain() { + S test; + T t_var = T(); + T vec[] = {1, 2}; + S s_arr[] = {1, 2}; + S &var = test; + #pragma omp target + #pragma omp teams +#pragma omp distribute parallel for lastprivate(t_var, vec, s_arr, s_arr, var, var) + for (int i = 0; i < 2; ++i) { + vec[i] = t_var; + s_arr[i] = var; + } + return T(); +} + +int main() { + static int svar; + volatile double g; + volatile double &g1 = g; + + #ifdef LAMBDA + // LAMBDA-LABEL: @main + // LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@.+]]( + [&]() { + static float sfvar; + // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( + // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams( + // LAMBDA: call void [[OFFLOADING_FUN:@.+]]( + + // LAMBDA: define{{.+}} void [[OFFLOADING_FUN]]( + // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED:@.+]] to {{.+}}) + #pragma omp target + #pragma omp teams +#pragma omp distribute parallel for lastprivate(g, g1, svar, sfvar) + for (int i = 0; i < 2; ++i) { + // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, double*{{.+}} [[G_IN:%.+]], double*{{.+}} [[G1_IN:%.+]], i{{[0-9]+}}*{{.+}} [[SVAR_IN:%.+]], float*{{.+}} [[SFVAR_IN:%.+]]) + // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double*, + // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = alloca double*, + // LAMBDA: [[SVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}*, + // LAMBDA: [[SFVAR_PRIVATE_ADDR:%.+]] = alloca float*, + // LAMBDA: [[TMP_G1:%.+]] = alloca double*, + // loop variables + // LAMBDA: {{.+}} = alloca i{{[0-9]+}}, + // LAMBDA: {{.+}} = alloca i{{[0-9]+}}, + // LAMBDA: {{.+}} = alloca i{{[0-9]+}}, + // LAMBDA: {{.+}} = alloca i{{[0-9]+}}, + // LAMBDA: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[G_PRIVATE:%.+]] = alloca double, + // LAMBDA: [[G1_PRIVATE:%.+]] = alloca double, + // LAMBDA: [[TMP_G1_PRIVATE:%.+]] = alloca double*, + // LAMBDA: [[SVAR_PRIVATE:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[SFVAR_PRIVATE:%.+]] = alloca float, + + // init addr alloca's + // LAMBDA: store double* [[G_IN]], double** [[G_PRIVATE_ADDR]], + // LAMBDA: store double* [[G1_IN]], double** [[G1_PRIVATE_ADDR]], + // LAMBDA: store i{{[0-9]+}}* [[SVAR_IN]], i{{[0-9]+}}** [[SVAR_PRIVATE_ADDR]], + // LAMBDA: store float* [[SFVAR_IN]], float** [[SFVAR_PRIVATE_ADDR]], + + // init private variables + // LAMBDA: [[G_IN_REF:%.+]] = load double*, double** [[G_PRIVATE_ADDR]], + // LAMBDA: [[SVAR_IN_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_PRIVATE_ADDR]], + // LAMBDA: [[SFVAR_IN_REF:%.+]] = load float*, float** [[SFVAR_PRIVATE_ADDR]], + // LAMBDA: [[G1_IN_REF:%.+]] = load double*, double** [[G1_PRIVATE_ADDR]], + // LAMBDA: store double* [[G1_IN_REF]], double** [[TMP_G1]], + // LAMBDA: [[TMP_G1_VAL:%.+]] = load double*, double** [[TMP_G1]], + // LAMBDA: store double* [[G1_PRIVATE]], double** [[TMP_G1_PRIVATE]], + + // LAMBDA: call {{.*}}void @__kmpc_for_static_init_4( + // LAMBDA: [[G1_PAR:%.+]] = load{{.+}}, {{.+}} [[TMP_G1_PRIVATE]], + // LAMBDA-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED:@.+]] to void ({{.+}})*), {{.+}}, {{.+}}, {{.+}} [[G_PRIVATE]], {{.+}} [[G1_PAR]], {{.+}} [[SVAR_PRIVATE]], {{.+}} [[SFVAR_PRIVATE]]) + // LAMBDA-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED:@.+]] to void ({{.+}})*), {{.+}}, {{.+}}, {{.+}} [[G_PRIVATE]], {{.+}} [[G1_PAR]], {{.+}} [[SVAR_PRIVATE]], {{.+}} [[SFVAR_PRIVATE]]) + // LAMBDA: call {{.*}}void @__kmpc_for_static_fini( + + // lastprivate + // LAMBDA: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]], + // LAMBDA: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0 + // LAMBDA: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]] + + // LAMBDA: [[OMP_LASTPRIV_BLOCK]]: + // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE]], + // LAMBDA: store{{.*}} double [[G_PRIV_VAL]], double* [[G_IN_REF]], + // LAMBDA: [[TMP_G1_PRIV_REF:%.+]] = load double*, double** [[TMP_G1_PRIVATE]], + // LAMBDA: [[TMP_G1_PRIV_VAL:%.+]] = load double, double* [[TMP_G1_PRIV_REF]], + // LAMBDA: store{{.*}} double [[TMP_G1_PRIV_VAL]], double* [[TMP_G1_VAL]], + + // LAMBDA: [[SVAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SVAR_PRIVATE]], + // LAMBDA: store i{{[0-9]+}} [[SVAR_PRIV_VAL]], i{{[0-9]+}}* [[SVAR_IN_REF]], + // LAMBDA: [[SFVAR_PRIV_VAL:%.+]] = load float, float* [[SFVAR_PRIVATE]], + // LAMBDA: store float [[SFVAR_PRIV_VAL]], float* [[SFVAR_IN_REF]], + // LAMBDA: br label %[[OMP_LASTPRIV_DONE]] + // LAMBDA: [[OMP_LASTPRIV_DONE]]: + // LAMBDA: ret + + g = 1; + g1 = 1; + svar = 3; + sfvar = 4.0; + // outlined function for 'parallel for' + // LAMBDA-64: define{{.+}} void [[OMP_PARFOR_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, {{.+}}, {{.+}}, {{.+}} [[G_IN:%.+]], {{.+}} [[G1_IN:%.+]], {{.+}} [[SVAR_IN:%.+]], {{.+}} [[SFVAR_IN:%.+]]) + // LAMBDA-32: define{{.+}} void [[OMP_PARFOR_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, {{.+}}, {{.+}}, {{.+}} [[G_IN:%.+]], {{.+}} [[G1_IN:%.+]], {{.+}} [[SVAR_IN:%.+]], {{.+}} [[SFVAR_IN:%.+]]) + + // addr alloca's + // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double*, + // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = alloca double*, + // LAMBDA: [[SVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}*, + // LAMBDA: [[SFVAR_PRIVATE_ADDR:%.+]] = alloca float*, + + // loop variables + // LAMBDA: {{.+}} = alloca i{{[0-9]+}}, + // LAMBDA: {{.+}} = alloca i{{[0-9]+}}, + // LAMBDA: {{.+}} = alloca i{{[0-9]+}}, + // LAMBDA: {{.+}} = alloca i{{[0-9]+}}, + + // private alloca's + // LAMBDA: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[G_PRIVATE:%.+]] = alloca double, + // LAMBDA: [[G1_PRIVATE:%.+]] = alloca double, + // LAMBDA: [[TMP_G1_PRIVATE:%.+]] = alloca double*, + // LAMBDA: [[SVAR_PRIVATE:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[SFVAR_PRIVATE:%.+]] = alloca float, + + // init addr alloca's + // LAMBDA: store double* [[G_IN]], double** [[G_PRIVATE_ADDR]], + // LAMBDA: store double* [[G1_IN]], double** [[G1_PRIVATE_ADDR]], + // LAMBDA: store i{{[0-9]+}}* [[SVAR_IN]], i{{[0-9]+}}** [[SVAR_PRIVATE_ADDR]], + // LAMBDA: store float* [[SFVAR_IN]], float** [[SFVAR_PRIVATE_ADDR]], + + // init private variables + // LAMBDA: [[G_IN_REF:%.+]] = load double*, double** [[G_PRIVATE_ADDR]], + // LAMBDA: [[SVAR_IN_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_PRIVATE_ADDR]], + // LAMBDA: [[SFVAR_IN_REF:%.+]] = load float*, float** [[SFVAR_PRIVATE_ADDR]], + + // LAMBDA: [[G1_IN_REF:%.+]] = load double*, double** [[G1_PRIVATE_ADDR]], + // LAMBDA: store double* [[G1_PRIVATE]], double** [[TMP_G1]], + + // LAMBDA: call {{.*}}void @__kmpc_for_static_init_4( + + // loop body + // LAMBDA: store double 1.0{{.+}}, double* [[G_PRIVATE]], + // LAMBDA: [[TMP_G1_REF:%.+]] = load double*, double** [[TMP_G1_PRIVATE]], + // LAMBDA: store{{.+}} double 1.0{{.+}}, double* [[TMP_G1_REF]], + // LAMBDA: store i{{[0-9]+}} 3, i{{[0-9]+}}* [[SVAR_PRIVATE]], + // LAMBDA: store float 4.0{{.+}}, float* [[SFVAR_PRIVATE]], + // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 + // LAMBDA: store double* [[G_PRIVATE]], double** [[G_PRIVATE_ADDR_REF]], + // LAMBDA: [[TMP_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 + // LAMBDA: [[G1_PRIVATE_ADDR_FROM_TMP:%.+]] = load double*, double** [[TMP_G1_PRIVATE]], + // LAMBDA: store double* [[G1_PRIVATE_ADDR_FROM_TMP]], double** [[TMP_PRIVATE_ADDR_REF]], + // LAMBDA: [[SVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 + // LAMBDA: store i{{[0-9]+}}* [[SVAR_PRIVATE]], i{{[0-9]+}}** [[SVAR_PRIVATE_ADDR_REF]] + // LAMBDA: [[SFVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 + // LAMBDA: store float* [[SFVAR_PRIVATE]], float** [[SFVAR_PRIVATE_ADDR_REF]] + // LAMBDA: call{{.*}} void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) + + // LAMBDA: call {{.*}}void @__kmpc_for_static_fini( + + // lastprivate + // LAMBDA: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]], + // LAMBDA: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0 + // LAMBDA: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]] + // LAMBDA: [[OMP_LASTPRIV_BLOCK]]: + // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE]], + // LAMBDA: store{{.*}} double [[G_PRIV_VAL]], double* [[G_IN_REF]], + // LAMBDA: [[TMP_G1_PRIV_REF:%.+]] = load double*, double** [[TMP_G1_PRIVATE]], + // LAMBDA: [[TMP_G1_PRIV_VAL:%.+]] = load double, double* [[TMP_G1_PRIV_REF]], + // LAMBDA: store{{.*}} double [[TMP_G1_PRIV_VAL]], double* [[G1_IN_REF]], + // LAMBDA: [[SVAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SVAR_PRIVATE]], + // LAMBDA: store i{{[0-9]+}} [[SVAR_PRIV_VAL]], i{{[0-9]+}}* [[SVAR_IN_REF]], + // LAMBDA: [[SFVAR_PRIV_VAL:%.+]] = load float, float* [[SFVAR_PRIVATE]], + // LAMBDA: store float [[SFVAR_PRIV_VAL]], float* [[SFVAR_IN_REF]], + // LAMBDA: br label %[[OMP_LASTPRIV_DONE]] + // LAMBDA: [[OMP_LASTPRIV_DONE]]: + // LAMBDA: ret + + [&]() { + // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) + // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], + g = 2; + g1 = 2; + svar = 4; + sfvar = 8.0; + // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] + // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 + // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]] + // LAMBDA: store double 2.0{{.+}}, double* [[G_REF]] + + // LAMBDA: [[TMP_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 + // LAMBDA: [[G1_REF:%.+]] = load double*, double** [[TMP_PTR_REF]] + // LAMBDA: store double 2.0{{.+}}, double* [[G1_REF]], + // LAMBDA: [[SVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 + // LAMBDA: [[SVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_PTR_REF]] + // LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SVAR_REF]] + // LAMBDA: [[SFVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 + // LAMBDA: [[SFVAR_REF:%.+]] = load float*, float** [[SFVAR_PTR_REF]] + // LAMBDA: store float 8.0{{.+}}, float* [[SFVAR_REF]] + }(); + } + }(); + return 0; + #else + S test; + int t_var = 0; + int vec[] = {1, 2}; + S s_arr[] = {1, 2}; + S &var = test; + + #pragma omp target + #pragma omp teams +#pragma omp distribute parallel for lastprivate(t_var, vec, s_arr, s_arr, var, var, svar) + for (int i = 0; i < 2; ++i) { + vec[i] = t_var; + s_arr[i] = var; + } + int i; + + return tmain(); + #endif +} + +// CHECK: define{{.*}} i{{[0-9]+}} @main() +// CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOAD_FUN:@.+]](i{{[0-9]+}} {{.+}}, [2 x i{{[0-9]+}}]* {{.+}}, [2 x [[S_FLOAT_TY]]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}}, i{{[0-9]+}} {{.+}}) +// CHECK: ret + +// CHECK: define{{.+}} [[OFFLOAD_FUN]](i{{[0-9]+}} {{.+}}, [2 x i{{[0-9]+}}]*{{.+}} {{.+}}, [2 x [[S_FLOAT_TY]]]*{{.+}} {{.+}}, [[S_FLOAT_TY]]*{{.+}} {{.+}}, i{{[0-9]+}} {{.+}}) +// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams( +// CHECK: ret +// +// CHECK: define internal void [[OMP_OUTLINED:@.+]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i{{[0-9]+}}*{{.+}} [[T_VAR_IN:%.+]], [2 x i{{[0-9]+}}]*{{.+}} [[VEC_IN:%.+]], [2 x [[S_FLOAT_TY]]]*{{.+}} [[S_ARR_IN:%.+]], [[S_FLOAT_TY]]*{{.+}} [[VAR_IN:%.+]], i{{[0-9]+}}*{{.*}} [[S_VAR_IN:%.+]]) +// CHECK: {{.+}} = alloca i{{[0-9]+}}*, +// CHECK: {{.+}} = alloca i{{[0-9]+}}*, +// CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}}*, +// CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*, +// CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_FLOAT_TY]]]*, +// CHECK: [[VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*, +// CHECK: [[SVAR_ADDR:%.+]] = alloca i{{[0-9]+}}*, +// CHECK: [[TMP:%.*]] = alloca [[S_FLOAT_TY]]*, +// skip loop variables +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], +// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]], +// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], +// CHECK: [[TMP_PRIV:%.+]] = alloca [[S_FLOAT_TY]]*, +// CHECK: [[S_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, + +// copy from parameters to local address variables +// CHECK: store i{{[0-9]+}}* [[T_VAR_IN]], i{{[0-9]+}}** [[T_VAR_ADDR]], +// CHECK: store [2 x i{{[0-9]+}}]* [[VEC_IN]], [2 x i{{[0-9]+}}]** [[VEC_ADDR]], +// CHECK: store [2 x [[S_FLOAT_TY]]]* [[S_ARR_IN]], [2 x [[S_FLOAT_TY]]]** [[S_ARR_ADDR]], +// CHECK: store [[S_FLOAT_TY]]* [[VAR_IN]], [[S_FLOAT_TY]]** [[VAR_ADDR]], +// CHECK: store i{{[0-9]+}}* [[S_VAR_IN]], i{{[0-9]+}}** [[SVAR_ADDR]], + +// load content of local address variables +// CHECK: [[T_VAR_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[T_VAR_ADDR]], +// CHECK: [[VEC_ADDR_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** [[VEC_ADDR]], +// CHECK: [[S_ARR_ADDR_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** [[S_ARR_ADDR]], +// CHECK: [[SVAR_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_ADDR]], +// CHECK: [[VAR_ADDR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[VAR_ADDR]], +// CHECK: store [[S_FLOAT_TY]]* [[VAR_ADDR_REF]], [[S_FLOAT_TY]]** [[TMP]], +// CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[OMP_IS_LAST]], + +// call constructor for s_arr +// CHECK: [[S_ARR_BGN:%.+]] = getelementptr{{.+}} [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]], +// CHECK: [[S_ARR_END:%.+]] = getelementptr {{.+}} [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_BGN]], +// CHECK: br label %[[S_ARR_CST_LOOP:.+]] +// CHECK: [[S_ARR_CST_LOOP]]: +// CHECK: [[S_ARR_CTOR:%.+]] = phi {{.+}} +// CHECK: call void [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[S_ARR_CTOR]]) +// CHECK: [[S_ARR_NEXT:%.+]] = getelementptr {{.+}} [[S_ARR_CTOR]], +// CHECK: [[S_ARR_DONE:%.+]] = icmp {{.+}} [[S_ARR_NEXT]], [[S_ARR_END]] +// CHECK: br i1 [[S_ARR_DONE]], label %[[S_ARR_CST_END:.+]], label %[[S_ARR_CST_LOOP]] +// CHECK: [[S_ARR_CST_END]]: +// CHECK: [[TMP_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[TMP]], +// CHECK: call void [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) +// CHECK: store [[S_FLOAT_TY]]* [[VAR_PRIV]], [[S_FLOAT_TY]]** [[TMP_PRIV]], + +// the distribute loop +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: [[TMP_PRIV_VAL:%.+]] = load {{.+}}, {{.+}} [[TMP_PRIV]], +// CHECK-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED:@.+]] to void ({{.+}})*), {{.+}}, {{.+}}, {{.+}} [[VEC_PRIV]], {{.+}} [[T_VAR_PRIV]], {{.+}} [[S_ARR_PRIV]], {{.+}} [[TMP_PRIV_VAL]], {{.+}} [[S_VAR_PRIV]]) +// CHECK-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED:@.+]] to void ({{.+}})*), {{.+}}, {{.+}}, {{.+}} [[VEC_PRIV]], {{.+}} [[T_VAR_PRIV]], {{.+}} [[S_ARR_PRIV]], {{.+}} [[TMP_PRIV_VAL]], {{.+}} [[S_VAR_PRIV]]) + +// CHECK: call void @__kmpc_for_static_fini( + +// lastprivates +// CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]], +// CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0 +// CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]] + +// CHECK: [[OMP_LASTPRIV_BLOCK]]: +// CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], +// CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_ADDR_REF]], +// CHECK: [[BCAST_VEC_ADDR_REF:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_ADDR_REF]] to i8* +// CHECK: [[BCAST_VEC_PRIV:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[BCAST_VEC_ADDR_REF]], i8* [[BCAST_VEC_PRIV]], +// CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_ADDR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 +// CHECK: [[S_ARR_PRIV_BCAST:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]] to [[S_FLOAT_TY]]* +// CHECK: [[S_ARR_BEGIN_GEP:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2 +// CHECK: [[S_ARR_IS_EMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_BEGIN_GEP]] +// CHECK: br i1 [[S_ARR_IS_EMPTY]], label %[[S_ARR_COPY_DONE:.+]], label %[[S_ARR_COPY_BLOCK:.+]] +// CHECK: [[S_ARR_COPY_BLOCK]]: +// CHECK: [[S_ARR_SRC_EL:%.+]] = phi [[S_FLOAT_TY]]*{{.+}} +// CHECK: [[S_ARR_DST_EL:%.+]] = phi [[S_FLOAT_TY]]*{{.+}} +// CHECK: [[S_ARR_DST_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[S_ARR_DST_EL]] to i8* +// CHECK: [[S_ARR_SRC_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[S_ARR_SRC_EL]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[S_ARR_DST_BCAST]], i8* [[S_ARR_SRC_BCAST]]{{.+}}) +// CHECK: [[S_ARR_DST_NEXT:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_DST_EL]], i{{[0-9]+}} 1 +// CHECK: [[S_ARR_SRC_NEXT:%.+]] = getelementptr{{.+}} +// CHECK: [[CPY_IS_FINISHED:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_DST_NEXT]], [[S_ARR_BEGIN_GEP]] +// CHECK: br i1 [[CPY_IS_FINISHED]], label %[[S_ARR_COPY_DONE]], label %[[S_ARR_COPY_BLOCK]] +// CHECK: [[S_ARR_COPY_DONE]]: +// CHECK: [[TMP_VAL1:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[TMP_PRIV]], +// CHECK: [[VAR_ADDR_REF_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[TMP_REF]] to i8* +// CHECK: [[TMP_VAL1_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[TMP_VAL1]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VAR_ADDR_REF_BCAST]], i8* [[TMP_VAL1_BCAST]],{{.+}}) +// CHECK: [[SVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[S_VAR_PRIV]], +// CHECK: store i{{[0-9]+}} [[SVAR_VAL]], i{{[0-9]+}}* [[SVAR_ADDR_REF]], +// CHECK: ret void + +// outlined function for 'parallel for' +// CHECK-64: define{{.+}} void [[OMP_PARFOR_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, {{.+}}, {{.+}}, {{.+}} [[VEC_IN:%.+]], {{.+}} [[T_VAR_IN:%.+]], {{.+}} [[S_ARR_IN:%.+]], {{.+}} [[VAR_IN:%.+]], {{.+}} [[SVAR_IN:%.+]]) +// CHECK-32: define{{.+}} void [[OMP_PARFOR_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, {{.+}}, {{.+}}, {{.+}} [[VEC_IN:%.+]], {{.+}} [[T_VAR_IN:%.+]], {{.+}} [[S_ARR_IN:%.+]], {{.+}} [[VAR_IN:%.+]], {{.+}} [[SVAR_IN:%.+]]) + +// CHECK: {{.+}} = alloca i{{[0-9]+}}*, +// CHECK: {{.+}} = alloca i{{[0-9]+}}*, +// CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*, +// CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}}*, +// CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_FLOAT_TY]]]*, +// CHECK: [[VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*, +// CHECK: [[SVAR_ADDR:%.+]] = alloca i{{[0-9]+}}*, +// skip loop variables +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], +// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]], +// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], +// CHECK: [[TMP_PRIV:%.+]] = alloca [[S_FLOAT_TY]]*, +// CHECK: [[S_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, + +// copy from parameters to local address variables +// CHECK: store [2 x i{{[0-9]+}}]* [[VEC_IN]], [2 x i{{[0-9]+}}]** [[VEC_ADDR]], +// CHECK: store i{{[0-9]+}}* [[T_VAR_IN]], i{{[0-9]+}}** [[T_VAR_ADDR]], +// CHECK: store [2 x [[S_FLOAT_TY]]]* [[S_ARR_IN]], [2 x [[S_FLOAT_TY]]]** [[S_ARR_ADDR]], +// CHECK: store [[S_FLOAT_TY]]* [[VAR_IN]], [[S_FLOAT_TY]]** [[VAR_ADDR]], +// CHECK: store i{{[0-9]+}}* [[S_VAR_IN]], i{{[0-9]+}}** [[SVAR_ADDR]], + +// load content of local address variables +// CHECK: [[VEC_ADDR_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** [[VEC_ADDR]], +// CHECK: [[T_VAR_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[T_VAR_ADDR]], +// CHECK: [[S_ARR_ADDR_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** [[S_ARR_ADDR]], +// CHECK: [[SVAR_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_ADDR]], +// CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[OMP_IS_LAST]], + +// call constructor for s_arr +// CHECK: [[S_ARR_BGN:%.+]] = getelementptr{{.+}} [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]], +// CHECK: [[S_ARR_END:%.+]] = getelementptr {{.+}} [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_BGN]], +// CHECK: br label %[[S_ARR_CST_LOOP:.+]] +// CHECK: [[S_ARR_CST_LOOP]]: +// CHECK: [[S_ARR_CTOR:%.+]] = phi {{.+}} +// CHECK: call void [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[S_ARR_CTOR]]) +// CHECK: [[S_ARR_NEXT:%.+]] = getelementptr {{.+}} [[S_ARR_CTOR]], +// CHECK: [[S_ARR_DONE:%.+]] = icmp {{.+}} [[S_ARR_NEXT]], [[S_ARR_END]] +// CHECK: br i1 [[S_ARR_DONE]], label %[[S_ARR_CST_END:.+]], label %[[S_ARR_CST_LOOP]] +// CHECK: [[S_ARR_CST_END]]: +// CHECK: [[VAR_ADDR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[VAR_ADDR]], +// CHECK: call void [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) +// CHECK: store [[S_FLOAT_TY]]* [[VAR_PRIV]], [[S_FLOAT_TY]]** [[TMP_PRIV]], + +// CHECK: call void @__kmpc_for_static_init_4( + +// loop body +// assignment: vec[i] = t_var; +// CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], +// CHECK: [[VEC_PTR:%.+]] = getelementptr inbounds [2 x i{{[0-9]+}}], [2 x i{{[0-9]+}}]* [[VEC_PRIV]], i{{[0-9]+}} 0, i{{[0-9]+}} {{.+}} +// CHECK: store i{{[0-9]+}} [[T_VAR_PRIV_VAL]], i{{[0-9]+}}* [[VEC_PTR]], + +// assignment: s_arr[i] = var; +// CHECK-DAG: [[S_ARR_PTR:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]], +// CHECK-DAG: [[TMP_VAL:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[TMP_PRIV]], +// CHECK-DAG: [[S_ARR_PTR_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[S_ARR_PTR]] to i8* +// CHECK-DAG: [[TMP_VAL_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[TMP_VAL]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[S_ARR_PTR_BCAST]], i8* [[TMP_VAL_BCAST]], + +// CHECK: call void @__kmpc_for_static_fini( + +// lastprivates +// CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]], +// CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0 +// CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]] + +// CHECK: [[OMP_LASTPRIV_BLOCK]]: +// CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], +// CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_ADDR_REF]], +// CHECK: [[BCAST_VEC_ADDR_REF:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_ADDR_REF]] to i8* +// CHECK: [[BCAST_VEC_PRIV:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[BCAST_VEC_ADDR_REF]], i8* [[BCAST_VEC_PRIV]], +// CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_ADDR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 +// CHECK: [[S_ARR_PRIV_BCAST:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]] to [[S_FLOAT_TY]]* +// CHECK: [[S_ARR_BEGIN_GEP:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2 +// CHECK: [[S_ARR_IS_EMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_BEGIN_GEP]] +// CHECK: br i1 [[S_ARR_IS_EMPTY]], label %[[S_ARR_COPY_DONE:.+]], label %[[S_ARR_COPY_BLOCK:.+]] +// CHECK: [[S_ARR_COPY_BLOCK]]: +// CHECK: [[S_ARR_SRC_EL:%.+]] = phi [[S_FLOAT_TY]]*{{.+}} +// CHECK: [[S_ARR_DST_EL:%.+]] = phi [[S_FLOAT_TY]]*{{.+}} +// CHECK: [[S_ARR_DST_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[S_ARR_DST_EL]] to i8* +// CHECK: [[S_ARR_SRC_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[S_ARR_SRC_EL]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[S_ARR_DST_BCAST]], i8* [[S_ARR_SRC_BCAST]]{{.+}}) +// CHECK: [[S_ARR_DST_NEXT:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_DST_EL]], i{{[0-9]+}} 1 +// CHECK: [[S_ARR_SRC_NEXT:%.+]] = getelementptr{{.+}} +// CHECK: [[CPY_IS_FINISHED:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_DST_NEXT]], [[S_ARR_BEGIN_GEP]] +// CHECK: br i1 [[CPY_IS_FINISHED]], label %[[S_ARR_COPY_DONE]], label %[[S_ARR_COPY_BLOCK]] +// CHECK: [[S_ARR_COPY_DONE]]: +// CHECK: [[TMP_VAL1:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[TMP_PRIV]], +// CHECK: [[VAR_ADDR_REF_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_ADDR_REF]] to i8* +// CHECK: [[TMP_VAL1_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[TMP_VAL1]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VAR_ADDR_REF_BCAST]], i8* [[TMP_VAL1_BCAST]],{{.+}}) +// CHECK: [[SVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[S_VAR_PRIV]], +// CHECK: store i{{[0-9]+}} [[SVAR_VAL]], i{{[0-9]+}}* [[SVAR_ADDR_REF]], +// CHECK: ret void + +// template tmain +// CHECK: define{{.*}} i{{[0-9]+}} [[TMAIN_INT:@.+]]() +// CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], +// CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOAD_FUN_1:@.+]](i{{[0-9]+}} {{.+}}, [2 x i{{[0-9]+}}]* {{.+}}, [2 x [[S_INT_TY]]]* {{.+}}, [[S_INT_TY]]* {{.+}}) +// CHECK: ret + +// CHECK: define internal void [[OFFLOAD_FUN_1]]( +// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, +// CHECK: ret + +// CHECK: define internal void [[OMP_OUTLINED_1:@.+]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i{{[0-9]+}}*{{.+}} [[T_VAR_IN:%.+]], [2 x i{{[0-9]+}}]*{{.+}} [[VEC_IN:%.+]], [2 x [[S_INT_TY]]]*{{.+}} [[S_ARR_IN:%.+]], [[S_INT_TY]]*{{.+}} [[VAR_IN:%.+]]) +// skip alloca of global_tid and bound_tid +// CHECK: {{.+}} = alloca i{{[0-9]+}}*, +// CHECK: {{.+}} = alloca i{{[0-9]+}}*, +// CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}}*, +// CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*, +// CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_INT_TY]]]*, +// CHECK: [[VAR_ADDR:%.+]] = alloca [[S_INT_TY]]*, +// CHECK: [[TMP:%.+]] = alloca [[S_INT_TY]]*, +// skip loop variables +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], +// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], +// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], +// CHECK: [[TMP_PRIV:%.+]] = alloca [[S_INT_TY]]*, + +// skip init of bound and global tid +// CHECK: store i{{[0-9]+}}* {{.*}}, +// CHECK: store i{{[0-9]+}}* {{.*}}, +// copy from parameters to local address variables +// CHECK: store i{{[0-9]+}}* [[T_VAR_IN]], i{{[0-9]+}}** [[T_VAR_ADDR]], +// CHECK: store [2 x i{{[0-9]+}}]* [[VEC_IN]], [2 x i{{[0-9]+}}]** [[VEC_ADDR]], +// CHECK: store [2 x [[S_INT_TY]]]* [[S_ARR_IN]], [2 x [[S_INT_TY]]]** [[S_ARR_ADDR]], +// CHECK: store [[S_INT_TY]]* [[VAR_IN]], [[S_INT_TY]]** [[VAR_ADDR]], + +// load content of local address variables +// CHECK: [[T_VAR_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[T_VAR_ADDR]], +// CHECK: [[VEC_ADDR_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** [[VEC_ADDR]], +// CHECK: [[S_ARR_ADDR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** [[S_ARR_ADDR]], +// CHECK: [[VAR_ADDR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_ADDR]], +// CHECK-DAG: store [[S_INT_TY]]* [[VAR_ADDR_REF]], [[S_INT_TY]]** [[TMP]], +// CHECK-DAG: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[OMP_IS_LAST]], +// CHECK-DAG: [[TMP_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[TMP]], + +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: [[TMP_PRIV_VAL:%.+]] = load {{.+}}, {{.+}} [[TMP_PRIV]], +// CHECK-64: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED:@.+]] to void ({{.+}})*), {{.+}}, {{.+}}, {{.+}} [[VEC_PRIV]], {{.+}} [[T_VAR_PRIV]], {{.+}} [[S_ARR_PRIV]], {{.+}} [[TMP_PRIV_VAL]]) +// CHECK-32: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED:@.+]] to void ({{.+}})*), {{.+}}, {{.+}}, {{.+}} [[VEC_PRIV]], {{.+}} [[T_VAR_PRIV]], {{.+}} [[S_ARR_PRIV]], {{.+}} [[TMP_PRIV_VAL]]) + +// CHECK: call void @__kmpc_for_static_fini( + +// lastprivates +// CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]], +// CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0 +// CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]] + +// CHECK: [[OMP_LASTPRIV_BLOCK]]: +// CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], +// CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_ADDR_REF]], +// CHECK: [[BCAST_VEC_ADDR_REF:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_ADDR_REF]] to i8* +// CHECK: [[BCAST_VEC_PRIV:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[BCAST_VEC_ADDR_REF]], i8* [[BCAST_VEC_PRIV]], +// CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_ADDR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 +// CHECK: [[S_ARR_PRIV_BCAST:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]] to [[S_INT_TY]]* +// CHECK: [[S_ARR_BEGIN_GEP:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2 +// CHECK: [[S_ARR_IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_BEGIN_GEP]] +// CHECK: br i1 [[S_ARR_IS_EMPTY]], label %[[S_ARR_COPY_DONE:.+]], label %[[S_ARR_COPY_BLOCK:.+]] +// CHECK: [[S_ARR_COPY_BLOCK]]: +// CHECK: [[S_ARR_SRC_EL:%.+]] = phi [[S_INT_TY]]*{{.+}} +// CHECK: [[S_ARR_DST_EL:%.+]] = phi [[S_INT_TY]]*{{.+}} +// CHECK: [[S_ARR_DST_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[S_ARR_DST_EL]] to i8* +// CHECK: [[S_ARR_SRC_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[S_ARR_SRC_EL]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[S_ARR_DST_BCAST]], i8* [[S_ARR_SRC_BCAST]]{{.+}}) +// CHECK: [[S_ARR_DST_NEXT:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_DST_EL]], i{{[0-9]+}} 1 +// CHECK: [[S_ARR_SRC_NEXT:%.+]] = getelementptr{{.+}} +// CHECK: [[CPY_IS_FINISHED:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_DST_NEXT]], [[S_ARR_BEGIN_GEP]] +// CHECK: br i1 [[CPY_IS_FINISHED]], label %[[S_ARR_COPY_DONE]], label %[[S_ARR_COPY_BLOCK]] +// CHECK: [[S_ARR_COPY_DONE]]: +// CHECK: [[TMP_VAL:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[TMP_PRIV]], +// CHECK: [[VAR_ADDR_REF_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[TMP_REF]] to i8* +// CHECK: [[TMP_VAL_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[TMP_VAL]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VAR_ADDR_REF_BCAST]], i8* [[TMP_VAL_BCAST]],{{.+}}) +// CHECK: ret void + +// outlined function for 'parallel for' +// CHECK-64: define{{.+}} void [[OMP_PARFOR_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, {{.+}}, {{.+}}, {{.+}} [[VEC_IN:%.+]], {{.+}} [[T_VAR_IN:%.+]], {{.+}} [[S_ARR_IN:%.+]], {{.+}} [[VAR_IN:%.+]]) +// CHECK-32: define{{.+}} void [[OMP_PARFOR_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, {{.+}}, {{.+}}, {{.+}} [[VEC_IN:%.+]], {{.+}} [[T_VAR_IN:%.+]], {{.+}} [[S_ARR_IN:%.+]], {{.+}} [[VAR_IN:%.+]]) + +// CHECK: {{.+}} = alloca i{{[0-9]+}}*, +// CHECK: {{.+}} = alloca i{{[0-9]+}}*, +// CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*, +// CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}}*, +// CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_INT_TY]]]*, +// CHECK: [[VAR_ADDR:%.+]] = alloca [[S_INT_TY]]*, +// skip loop variables +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: {{.+}} = alloca i{{[0-9]+}}, +// CHECK: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], +// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], +// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], +// CHECK: [[TMP_PRIV:%.+]] = alloca [[S_INT_TY]]*, + +// copy from parameters to local address variables +// CHECK: store [2 x i{{[0-9]+}}]* [[VEC_IN]], [2 x i{{[0-9]+}}]** [[VEC_ADDR]], +// CHECK: store i{{[0-9]+}}* [[T_VAR_IN]], i{{[0-9]+}}** [[T_VAR_ADDR]], +// CHECK: store [2 x [[S_INT_TY]]]* [[S_ARR_IN]], [2 x [[S_INT_TY]]]** [[S_ARR_ADDR]], +// CHECK: store [[S_INT_TY]]* [[VAR_IN]], [[S_INT_TY]]** [[VAR_ADDR]], + +// load content of local address variables +// CHECK: [[VEC_ADDR_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** [[VEC_ADDR]], +// CHECK: [[T_VAR_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[T_VAR_ADDR]], +// CHECK: [[S_ARR_ADDR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** [[S_ARR_ADDR]], +// CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[OMP_IS_LAST]], + +// call constructor for s_arr +// CHECK: [[S_ARR_BGN:%.+]] = getelementptr{{.+}} [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]], +// CHECK: [[S_ARR_END:%.+]] = getelementptr {{.+}} [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BGN]], +// CHECK: br label %[[S_ARR_CST_LOOP:.+]] +// CHECK: [[S_ARR_CST_LOOP]]: +// CHECK: [[S_ARR_CTOR:%.+]] = phi {{.+}} +// CHECK: call void [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_CTOR]]) +// CHECK: [[S_ARR_NEXT:%.+]] = getelementptr {{.+}} [[S_ARR_CTOR]], +// CHECK: [[S_ARR_DONE:%.+]] = icmp {{.+}} [[S_ARR_NEXT]], [[S_ARR_END]] +// CHECK: br i1 [[S_ARR_DONE]], label %[[S_ARR_CST_END:.+]], label %[[S_ARR_CST_LOOP]] +// CHECK: [[S_ARR_CST_END]]: +// CHECK: [[VAR_ADDR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_ADDR]], +// CHECK: call void [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]]) +// CHECK: store [[S_INT_TY]]* [[VAR_PRIV]], [[S_INT_TY]]** [[TMP_PRIV]], + +// CHECK: call void @__kmpc_for_static_init_4( + +// assignment: vec[i] = t_var; +// CHECK: [[IV_VAL:%.+]] = +// CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], +// CHECK: [[VEC_PTR:%.+]] = getelementptr inbounds [2 x i{{[0-9]+}}], [2 x i{{[0-9]+}}]* [[VEC_PRIV]], i{{[0-9]+}} 0, i{{[0-9]+}} {{.+}} +// CHECK: store i{{[0-9]+}} [[T_VAR_PRIV_VAL]], i{{[0-9]+}}* [[VEC_PTR]], + +// assignment: s_arr[i] = var; +// CHECK-DAG: [[S_ARR_PTR:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]], +// CHECK-DAG: [[TMP_VAL:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[TMP_PRIV]], +// CHECK-DAG: [[S_ARR_PTR_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[S_ARR_PTR]] to i8* +// CHECK-DAG: [[TMP_VAL_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[TMP_VAL]] to i8* +// CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* [[S_ARR_PTR_BCAST]], i8* [[TMP_VAL_BCAST]], + +// CHECK: call void @__kmpc_for_static_fini( + +// lastprivates +// CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]], +// CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0 +// CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]] + +// CHECK: [[OMP_LASTPRIV_BLOCK]]: +// CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], +// CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_ADDR_REF]], +// CHECK: [[BCAST_VEC_ADDR_REF:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_ADDR_REF]] to i8* +// CHECK: [[BCAST_VEC_PRIV:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[BCAST_VEC_ADDR_REF]], i8* [[BCAST_VEC_PRIV]], +// CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_ADDR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 +// CHECK: [[S_ARR_PRIV_BCAST:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]] to [[S_INT_TY]]* +// CHECK: [[S_ARR_BEGIN_GEP:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2 +// CHECK: [[S_ARR_IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_BEGIN_GEP]] +// CHECK: br i1 [[S_ARR_IS_EMPTY]], label %[[S_ARR_COPY_DONE:.+]], label %[[S_ARR_COPY_BLOCK:.+]] +// CHECK: [[S_ARR_COPY_BLOCK]]: +// CHECK: [[S_ARR_SRC_EL:%.+]] = phi [[S_INT_TY]]*{{.+}} +// CHECK: [[S_ARR_DST_EL:%.+]] = phi [[S_INT_TY]]*{{.+}} +// CHECK: [[S_ARR_DST_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[S_ARR_DST_EL]] to i8* +// CHECK: [[S_ARR_SRC_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[S_ARR_SRC_EL]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[S_ARR_DST_BCAST]], i8* [[S_ARR_SRC_BCAST]]{{.+}}) +// CHECK: [[S_ARR_DST_NEXT:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_DST_EL]], i{{[0-9]+}} 1 +// CHECK: [[S_ARR_SRC_NEXT:%.+]] = getelementptr{{.+}} +// CHECK: [[CPY_IS_FINISHED:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_DST_NEXT]], [[S_ARR_BEGIN_GEP]] +// CHECK: br i1 [[CPY_IS_FINISHED]], label %[[S_ARR_COPY_DONE]], label %[[S_ARR_COPY_BLOCK]] +// CHECK: [[S_ARR_COPY_DONE]]: +// CHECK: [[TMP_VAL1:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[TMP_PRIV]], +// CHECK: [[VAR_ADDR_REF_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_ADDR_REF]] to i8* +// CHECK: [[TMP_VAL1_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[TMP_VAL1]] to i8* +// CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VAR_ADDR_REF_BCAST]], i8* [[TMP_VAL1_BCAST]],{{.+}}) +// CHECK: ret void + +#endif diff --git a/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp b/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp new file mode 100644 index 0000000..89e9905 --- /dev/null +++ b/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp @@ -0,0 +1,121 @@ +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple %itanium_abi_triple -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s +// expected-no-diagnostics +#ifndef HEADER +#define HEADER + +typedef __INTPTR_TYPE__ intptr_t; + +// CHECK-DAG: [[IDENT_T_TY:%.+]] = type { i32, i32, i32, i32, i8* } +// CHECK-DAG: [[S_TY:%.+]] = type { [[INTPTR_T_TY:i[0-9]+]], [[INTPTR_T_TY]], [[INTPTR_T_TY]] } +// CHECK-DAG: [[STR:@.+]] = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00" +// CHECK-DAG: [[DEF_LOC_2:@.+]] = private unnamed_addr constant [[IDENT_T_TY]] { i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* [[STR]], i32 0, i32 0) } + +void foo(); + +struct S { + intptr_t a, b, c; + S(intptr_t a) : a(a) {} + operator char() { return a; } + ~S() {} +}; + +template +int tmain() { +#pragma omp target +#pragma omp teams +#pragma omp distribute parallel for num_threads(C) + for (int i = 0; i < 100; i++) + foo(); +#pragma omp target +#pragma omp teams +#pragma omp distribute parallel for num_threads(T(23)) + for (int i = 0; i < 100; i++) + foo(); + return 0; +} + +int main() { + S s(0); + char a = s; +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_0:@.+]]( +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOADING_FUN_1:@.+]]( +// CHECK: invoke{{.+}} [[TMAIN_5:@.+]]() +// CHECK: invoke{{.+}} [[TMAIN_1:@.+]]() +#pragma omp target +#pragma omp teams + // CHECK: define internal void [[OFFLOADING_FUN_0]]( + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 0, {{.+}}* [[OMP_TEAMS_OUTLINED_0:@.+]] to {{.+}}) +#pragma omp distribute parallel for num_threads(2) + for (int i = 0; i < 100; i++) { + // CHECK: define{{.+}} void [[OMP_TEAMS_OUTLINED_0]]( + // CHECK: call {{.*}}void @__kmpc_push_num_threads([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 {{.+}}, i32 2) + // CHECK: call {{.*}}void {{.*}} @__kmpc_fork_call( + foo(); + } +#pragma omp target +#pragma omp teams + // CHECK: define internal void [[OFFLOADING_FUN_1]]( + + // CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 1, {{.+}}* [[OMP_TEAMS_OUTLINED_1:@.+]] to {{.+}}) +#pragma omp distribute parallel for num_threads(a) + for (int i = 0; i < 100; i++) { + // CHECK: define{{.+}} void [[OMP_TEAMS_OUTLINED_1]]( + // CHECK-DAG: [[A_ADDR:%.+]] = alloca i8*, + // CHECK-DAG: [[A_REF:%.+]] = load i8*, i8** [[A_ADDR]], + // CHECK-DAG: [[A_VAL:%.+]] = load i8, i8* [[A_REF]], + // CHECK-DAG: [[A_EXT:%.+]] = sext i8 [[A_VAL]] to {{.+}} + // CHECK: call {{.*}}void @__kmpc_push_num_threads([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 {{.+}}, i32 [[A_EXT]]) + // CHECK: call {{.*}}void {{.*}} @__kmpc_fork_call( + foo(); + } + return a + tmain() + tmain(); +} + +// tmain 5 +// CHECK-DAG: define {{.*}}i{{[0-9]+}} [[TMAIN_5]]() +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[T_OFFLOADING_FUN_0:@.+]]( +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[T_OFFLOADING_FUN_1:@.+]]( + +// tmain 1 +// CHECK-DAG: define {{.*}}i{{[0-9]+}} [[TMAIN_1]]() +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[T_OFFLOADING_FUN_2:@.+]]( +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[T_OFFLOADING_FUN_3:@.+]]( + +// CHECK: define internal void [[T_OFFLOADING_FUN_0]]( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 0, {{.+}}* [[T_OMP_TEAMS_OUTLINED_0:@.+]] to {{.+}}) + +// CHECK: define{{.+}} void [[T_OMP_TEAMS_OUTLINED_0]]( +// CHECK: call {{.*}}void @__kmpc_push_num_threads([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 {{.+}}, i32 5) +// CHECK: call {{.*}}void {{.*}} @__kmpc_fork_call( + +// CHECK: define internal void [[T_OFFLOADING_FUN_1]]( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 0, {{.+}}* [[T_OMP_TEAMS_OUTLINED_1:@.+]] to {{.+}}) + +// CHECK: define{{.+}} void [[T_OMP_TEAMS_OUTLINED_1]]( +// CHECK: call {{.*}}void @__kmpc_push_num_threads([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 {{.+}}, i32 23) +// CHECK: call {{.*}}void {{.*}} @__kmpc_fork_call( + +// CHECK: define internal void [[T_OFFLOADING_FUN_2]]( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 0, {{.+}}* [[T_OMP_TEAMS_OUTLINED_2:@.+]] to {{.+}}) + +// CHECK: define{{.+}} void [[T_OMP_TEAMS_OUTLINED_2]]( +// CHECK: call {{.*}}void @__kmpc_push_num_threads([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 {{.+}}, i32 1) +// CHECK: call {{.*}}void {{.*}} @__kmpc_fork_call( + +// CHECK: define internal void [[T_OFFLOADING_FUN_3]]( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 0, {{.+}}* [[T_OMP_TEAMS_OUTLINED_3:@.+]] to {{.+}}) + +// CHECK: define{{.+}} void [[T_OMP_TEAMS_OUTLINED_3]]( +// CHECK-DAG: [[CALL_RES:%.+]] = invoke{{.*}} i8 [[S_TY_CHAR_OP:@.+]]([[S_TY]]* {{.+}}) +// CHECK-DAG: [[CALL_RES_SEXT:%.+]] = sext i8 [[CALL_RES]] to {{.+}} +// CHECK: call {{.*}}void @__kmpc_push_num_threads([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 {{.+}}, i32 [[CALL_RES_SEXT]]) +// CHECK: call {{.*}}void {{.*}} @__kmpc_fork_call( +#endif diff --git a/test/OpenMP/distribute_parallel_for_private_codegen.cpp b/test/OpenMP/distribute_parallel_for_private_codegen.cpp new file mode 100644 index 0000000..d8d8a9a --- /dev/null +++ b/test/OpenMP/distribute_parallel_for_private_codegen.cpp @@ -0,0 +1,297 @@ +// RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64 +// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64 +// RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32 +// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32 + +// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 +// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 +// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 +// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 +// expected-no-diagnostics +#ifndef HEADER +#define HEADER + +template +struct S { + T f; + S(T a) : f(a) {} + S() : f() {} + operator T() { return T(); } + ~S() {} +}; + +// CHECK: [[S_FLOAT_TY:%.+]] = type { float } +// CHECK: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} } +template +T tmain() { + S test; + T t_var = T(); + T vec[] = {1, 2}; + S s_arr[] = {1, 2}; + S &var = test; + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for private(t_var, vec, s_arr, s_arr, var, var) + for (int i = 0; i < 2; ++i) { + vec[i] = t_var; + s_arr[i] = var; + } + return T(); +} + +int main() { + static int svar; + volatile double g; + volatile double &g1 = g; + + #ifdef LAMBDA + // LAMBDA-LABEL: @main + // LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@.+]]( + [&]() { + static float sfvar; + // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( + // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams( + // LAMBDA: call void [[OFFLOADING_FUN:@.+]]( + + // LAMBDA: define{{.+}} void [[OFFLOADING_FUN]]() + // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 0, {{.+}}* [[OMP_OUTLINED:@.+]] to {{.+}}) + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for private(g, g1, svar, sfvar) + for (int i = 0; i < 2; ++i) { + // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}) + // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double, + // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = alloca double, + // LAMBDA: [[TMP_PRIVATE_ADDR:%.+]] = alloca double*, + // LAMBDA: [[SVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[SFVAR_PRIVATE_ADDR:%.+]] = alloca float, + // LAMBDA: store double* [[G1_PRIVATE_ADDR]], double** [[TMP_PRIVATE_ADDR]], + // LAMBDA: call {{.*}}void @__kmpc_for_static_init_4( + // LAMBDA: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED:@.+]] to {{.+}}, + // LAMBDA: call {{.*}}void @__kmpc_for_static_fini( + // LAMBDA: ret void + + // LAMBDA: define{{.+}} void [[OMP_PARFOR_OUTLINED]]( + // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double, + // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = alloca double, + // LAMBDA: [[TMP_PRIVATE_ADDR:%.+]] = alloca double*, + // LAMBDA: [[SVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, + // LAMBDA: [[SFVAR_PRIVATE_ADDR:%.+]] = alloca float, + + g = 1; + g1 = 1; + svar = 3; + sfvar = 4.0; + // LAMBDA: store double* [[G1_PRIVATE_ADDR]], double** [[TMP_PRIVATE_ADDR]], + // LAMBDA: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]], + // LAMBDA: store i{{[0-9]+}} 3, i{{[0-9]+}}* [[SVAR_PRIVATE_ADDR]], + // LAMBDA: store float 4.0{{.+}}, float* [[SFVAR_PRIVATE_ADDR]], + // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 + // LAMBDA: store double* [[G_PRIVATE_ADDR]], double** [[G_PRIVATE_ADDR_REF]], + // LAMBDA: [[TMP_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 + // LAMBDA: [[G1_PRIVATE_ADDR_FROM_TMP:%.+]] = load double*, double** [[TMP_PRIVATE_ADDR]], + // LAMBDA: store double* [[G1_PRIVATE_ADDR_FROM_TMP]], double** [[TMP_PRIVATE_ADDR_REF]], + // LAMBDA: [[SVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 + // LAMBDA: store i{{[0-9]+}}* [[SVAR_PRIVATE_ADDR]], i{{[0-9]+}}** [[SVAR_PRIVATE_ADDR_REF]] + // LAMBDA: [[SFVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 + // LAMBDA: store float* [[SFVAR_PRIVATE_ADDR]], float** [[SFVAR_PRIVATE_ADDR_REF]] + // LAMBDA: call{{.*}} void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) + // LAMBDA: call {{.*}}void @__kmpc_for_static_fini( + // LAMBDA: ret void + [&]() { + // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) + // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], + g = 2; + g1 = 2; + svar = 4; + sfvar = 8.0; + // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] + // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 + // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]] + // LAMBDA: store double 2.0{{.+}}, double* [[G_REF]] + + // LAMBDA: [[TMP_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 + // LAMBDA: [[G1_REF:%.+]] = load double*, double** [[TMP_PTR_REF]] + // LAMBDA: store double 2.0{{.+}}, double* [[G1_REF]], + // LAMBDA: [[SVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 + // LAMBDA: [[SVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_PTR_REF]] + // LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SVAR_REF]] + // LAMBDA: [[SFVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 + // LAMBDA: [[SFVAR_REF:%.+]] = load float*, float** [[SFVAR_PTR_REF]] + // LAMBDA: store float 8.0{{.+}}, float* [[SFVAR_REF]] + }(); + } + }(); + return 0; + #else + S test; + int t_var = 0; + int vec[] = {1, 2}; + S s_arr[] = {1, 2}; + S &var = test; + + #pragma omp target + #pragma omp teams + #pragma omp distribute parallel for private(t_var, vec, s_arr, s_arr, var, var, svar) + for (int i = 0; i < 2; ++i) { + vec[i] = t_var; + s_arr[i] = var; + } + return tmain(); + #endif +} + +// CHECK: define{{.*}} i{{[0-9]+}} @main() +// CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOAD_FUN_0:@.+]]( + +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_DESTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) +// CHECK: ret + +// CHECK: define{{.+}} [[OFFLOAD_FUN_0]]() +// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[OMP_OUTLINED_0:@.+]] to void +// CHECK: ret +// +// CHECK: define internal void [[OMP_OUTLINED_0]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}) +// CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], +// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]], +// CHECK-NOT: alloca [2 x [[S_FLOAT_TY]]], +// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], +// CHECK-NOT: alloca [[S_FLOAT_TY]], +// CHECK: [[S_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, +// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] +// CHECK-NOT: [[T_VAR_PRIV]] +// CHECK-NOT: [[VEC_PRIV]] +// this is the ctor loop +// CHECK: {{.+}}: +// CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_FLOAT_TY]]* +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]]) +// CHECK-NOT: [[T_VAR_PRIV]] +// CHECK-NOT: [[VEC_PRIV]] +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_0:@.+]] to {{.+}}, +// CHECK: call void @__kmpc_for_static_fini( + +// call destructors: var.. +// CHECK-DAG: call {{.+}} [[S_FLOAT_TY_DEF_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) + +// ..and s_arr +// CHECK: {{.+}}: +// CHECK: [[S_ARR_EL_PAST:%.+]] = phi [[S_FLOAT_TY]]* +// CHECK: [[S_ARR_PRIV_ITEM:%.+]] = getelementptr {{.+}}, {{.+}} [[S_ARR_EL_PAST]], +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_DESTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]]) + +// CHECK: ret void + +// By OpenMP specifications, private applies to both distribute and parallel for. +// However, the support for 'private' of 'parallel' is only used when 'parallel' +// is found alone. Therefore we only have one 'private' support for 'parallel for' +// in combination +// CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_0]]( +// CHECK: [[T_VAR_PRIV:%t_var+]] = alloca i{{[0-9]+}}, +// CHECK: [[VEC_PRIV:%vec+]] = alloca [2 x i{{[0-9]+}}], +// CHECK: [[S_ARR_PRIV:%s_arr+]] = alloca [2 x [[S_FLOAT_TY]]], +// CHECK-NOT: alloca [2 x [[S_FLOAT_TY]]], +// CHECK: [[VAR_PRIV:%var+]] = alloca [[S_FLOAT_TY]], +// CHECK-NOT: alloca [[S_FLOAT_TY]], +// CHECK: [[S_VAR_PRIV:%svar+]] = alloca i{{[0-9]+}}, +// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] +// CHECK-NOT: [[T_VAR_PRIV]] +// CHECK-NOT: [[VEC_PRIV]] +// this is the ctor loop +// CHECK: {{.+}}: +// CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_FLOAT_TY]]* +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]]) +// CHECK-NOT: [[T_VAR_PRIV]] +// CHECK-NOT: [[VEC_PRIV]] +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: call void @__kmpc_for_static_fini( + +// call destructors: var.. +// CHECK-DAG: call {{.+}} [[S_FLOAT_TY_DEF_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) + +// ..and s_arr +// CHECK: {{.+}}: +// CHECK: [[S_ARR_EL_PAST:%.+]] = phi [[S_FLOAT_TY]]* +// CHECK: [[S_ARR_PRIV_ITEM:%.+]] = getelementptr {{.+}}, {{.+}} [[S_ARR_EL_PAST]], +// CHECK: call {{.*}} [[S_FLOAT_TY_DEF_DESTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]]) + +// CHECK: ret void + +// template tmain with S_INT_TY +// CHECK: define{{.*}} i{{[0-9]+}} [[TMAIN_INT:@.+]]() +// CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], +// CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) +// CHECK: call i{{[0-9]+}} @__tgt_target_teams( +// CHECK: call void [[OFFLOAD_FUN_1:@.+]]( +// CHECK: call {{.*}} [[S_INT_TY_DEF_DESTR:@.+]]([[S_INT_TY]]* [[TEST]]) +// CHECK: ret + +// CHECK: ret + +// CHECK: define internal void [[OFFLOAD_FUN_1]]() +// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[OMP_OUTLINED_1:@.+]] to void +// CHECK: ret +// +// CHECK: define internal void [[OMP_OUTLINED_1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}) +// CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, +// CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], +// CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], +// CHECK-NOT: alloca [2 x [[S_INT_TY]]], +// CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], +// CHECK-NOT: alloca [[S_INT_TY]], +// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] +// CHECK-NOT: [[T_VAR_PRIV]] +// CHECK-NOT: [[VEC_PRIV]] +// CHECK: {{.+}}: +// CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]* +// CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]]) +// CHECK-NOT: [[T_VAR_PRIV]] +// CHECK-NOT: [[VEC_PRIV]] +// CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]]) +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_1:@.+]] to {{.+}}, +// CHECK: call void @__kmpc_for_static_fini( +// CHECK: ret void + +// CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_1]]( +// CHECK: [[T_VAR_PRIV:%t_var+]] = alloca i{{[0-9]+}}, +// CHECK: [[VEC_PRIV:%vec+]] = alloca [2 x i{{[0-9]+}}], +// CHECK: [[S_ARR_PRIV:%s_arr+]] = alloca [2 x [[S_INT_TY]]], +// CHECK-NOT: alloca [2 x [[S_INT_TY]]], +// CHECK: [[VAR_PRIV:%var+]] = alloca [[S_INT_TY]], +// CHECK-NOT: alloca [[S_INT_TY]], +// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] +// CHECK-NOT: [[T_VAR_PRIV]] +// CHECK-NOT: [[VEC_PRIV]] +// this is the ctor loop +// CHECK: {{.+}}: +// CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]* +// CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]]) +// CHECK-NOT: [[T_VAR_PRIV]] +// CHECK-NOT: [[VEC_PRIV]] +// CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]]) +// CHECK: call void @__kmpc_for_static_init_4( +// CHECK: call void @__kmpc_for_static_fini( + +// call destructors: var.. +// CHECK-DAG: call {{.+}} [[S_INT_TY_DEF_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) + +// ..and s_arr +// CHECK: {{.+}}: +// CHECK: [[S_ARR_EL_PAST:%.+]] = phi [[S_INT_TY]]* +// CHECK: [[S_ARR_PRIV_ITEM:%.+]] = getelementptr {{.+}}, {{.+}} [[S_ARR_EL_PAST]], +// CHECK: call {{.*}} [[S_INT_TY_DEF_DESTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]]) + +// CHECK: ret void + +#endif diff --git a/test/OpenMP/distribute_parallel_for_proc_bind_codegen.cpp b/test/OpenMP/distribute_parallel_for_proc_bind_codegen.cpp new file mode 100644 index 0000000..958b4e7 --- /dev/null +++ b/test/OpenMP/distribute_parallel_for_proc_bind_codegen.cpp @@ -0,0 +1,93 @@ +// add -fopenmp-targets + +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s +// expected-no-diagnostics +#ifndef HEADER +#define HEADER + +typedef __INTPTR_TYPE__ intptr_t; + +// CHECK-DAG: [[IDENT_T_TY:%.+]] = type { i32, i32, i32, i32, i8* } +// CHECK-DAG: [[STR:@.+]] = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00" +// CHECK-DAG: [[DEF_LOC_2:@.+]] = private unnamed_addr constant [[IDENT_T_TY]] { i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* [[STR]], i32 0, i32 0) } + +void foo(); + +struct S { + intptr_t a, b, c; + S(intptr_t a) : a(a) {} + operator char() { return a; } + ~S() {} +}; + +template +T tmain() { +#pragma omp target +#pragma omp teams +#pragma omp distribute parallel for proc_bind(master) + for(int i = 0; i < 1000; i++) {} + return T(); +} + +int main() { + // CHECK-LABEL: @main +#pragma omp target +#pragma omp teams +#pragma omp distribute parallel for proc_bind(spread) + for(int i = 0; i < 1000; i++) {} +#pragma omp target +#pragma omp teams +#pragma omp distribute parallel for proc_bind(close) + for(int i = 0; i < 1000; i++) {} + return tmain(); +} + +// CHECK: call {{.*}}@__tgt_target_teams({{.+}}) +// CHECK: call void [[OFFL1:@.+]]() +// CHECK: call {{.*}}@__tgt_target_teams({{.+}}) +// CHECK: call void [[OFFL2:@.+]]() +// CHECK: [[CALL_RET:%.+]] = call{{.+}} i32 [[TMAIN:@.+]]() +// CHECK: ret i32 [[CALL_RET]] + +// CHECK: define{{.+}} void [[OFFL1]]( +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, {{.+}}, {{.+}}* [[OMP_OUTLINED_1:@.+]] to {{.+}}) + +// CHECK: define{{.+}} [[OMP_OUTLINED_1]](i32* {{.+}} [[GTID_IN:%.+]], +// CHECK: [[GTID_ADDR:%.+]] = alloca i32*, +// CHECK: store i32* [[GTID_IN]], i32** [[GTID_ADDR]], +// CHECK: [[GTID_REF:%.+]] = load i32*, i32** [[GTID_ADDR]], +// CHECK: [[GTID_VAL:%.+]] = load i32, i32* [[GTID_REF]], +// CHECK: call {{.*}}void @__kmpc_push_proc_bind([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 [[GTID_VAL]], i32 4) +// CHECK: call {{.*}}void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call( +// CHECK: ret void + +// CHECK: define{{.+}} [[OFFL2]]() +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, {{.+}}, {{.+}}* [[OMP_OUTLINED_1:@.+]] to {{.+}}) + +// CHECK: define{{.+}} [[OMP_OUTLINED_1]](i32* {{.+}} [[GTID_IN:%.+]], +// CHECK: [[GTID_ADDR:%.+]] = alloca i32*, +// CHECK: store i32* [[GTID_IN]], i32** [[GTID_ADDR]], +// CHECK: [[GTID_REF:%.+]] = load i32*, i32** [[GTID_ADDR]], +// CHECK: [[GTID_VAL:%.+]] = load i32, i32* [[GTID_REF]], +// CHECK: call {{.*}}void @__kmpc_push_proc_bind([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 [[GTID_VAL]], i32 3) +// CHECK: call {{.*}}void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call( +// CHECK: ret void + +// CHECK: define{{.+}} [[TMAIN]]() +// CHECK: call {{.*}}@__tgt_target_teams({{.+}}) +// CHECK: call void [[OFFL3:@.+]]() + +// CHECK: define{{.+}} [[OFFL3]]() +// CHECK: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, {{.+}}, {{.+}}* [[OMP_OUTLINED_3:@.+]] to {{.+}}) + +// CHECK: define{{.+}} [[OMP_OUTLINED_3]](i32* {{.+}} [[GTID_IN:%.+]], +// CHECK: [[GTID_ADDR:%.+]] = alloca i32*, +// CHECK: store i32* [[GTID_IN]], i32** [[GTID_ADDR]], +// CHECK: [[GTID_REF:%.+]] = load i32*, i32** [[GTID_ADDR]], +// CHECK: [[GTID_VAL:%.+]] = load i32, i32* [[GTID_REF]], +// CHECK: call {{.*}}void @__kmpc_push_proc_bind([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 [[GTID_VAL]], i32 2) +// CHECK: call {{.*}}void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call( +// CHECK: ret void +#endif diff --git a/test/Parser/cxx-modules-import.cpp b/test/Parser/cxx-modules-import.cpp deleted file mode 100644 index 0600edd..0000000 --- a/test/Parser/cxx-modules-import.cpp +++ /dev/null @@ -1,58 +0,0 @@ -// RUN: rm -rf %t -// RUN: mkdir -p %t -// RUN: echo 'module x; int a, b;' > %t/x.cppm -// RUN: echo 'module x.y; int c;' > %t/x.y.cppm -// -// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %t/x.cppm -o %t/x.pcm -// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface -fmodule-file=%t/x.pcm %t/x.y.cppm -o %t/x.y.pcm -// -// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ -// RUN: -DTEST=1 -DMODULE_KIND=implementation -DMODULE_NAME=z -// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ -// RUN: -DTEST=2 -DMODULE_KIND=implementation -DMODULE_NAME=x -// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ -// RUN: -DTEST=3 -DMODULE_KIND= -DMODULE_NAME=z -// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ -// RUN: -DTEST=4 -DMODULE_KIND=partition -DMODULE_NAME=z -// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ -// RUN: -DTEST=5 -DMODULE_KIND=elderberry -DMODULE_NAME=z -// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ -// RUN: -DTEST=1 -DMODULE_KIND=implementation -DMODULE_NAME='z [[]]' -// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ -// RUN: -DTEST=6 -DMODULE_KIND=implementation -DMODULE_NAME='z [[fancy]]' -// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \ -// RUN: -DTEST=7 -DMODULE_KIND=implementation -DMODULE_NAME='z [[maybe_unused]]' - -module MODULE_KIND MODULE_NAME; -#if TEST == 3 -// expected-error@-2 {{'module' declaration found while not building module interface}} -#elif TEST == 4 -// expected-error@-4 {{'module partition' declaration found while not building module interface}} -#elif TEST == 5 -// expected-error@-6 {{unexpected module kind 'elderberry'}} -#elif TEST == 6 -// expected-warning@-8 {{unknown attribute 'fancy' ignored}} -#elif TEST == 7 -// expected-error-re@-10 {{'maybe_unused' attribute cannot be applied to a module{{$}}}} -#endif - -int use_1 = a; -#if TEST != 2 -// expected-error@-2 {{declaration of 'a' must be imported from module 'x' before it is required}} -// expected-note@x.cppm:1 {{here}} -#endif - -import x; - -int use_2 = b; // ok - -import x [[]]; -import x [[foo]]; // expected-warning {{unknown attribute 'foo' ignored}} -import x [[noreturn]]; // expected-error {{'noreturn' attribute cannot be applied to a module import}} -import x [[blarg::noreturn]]; // expected-warning {{unknown attribute 'noreturn' ignored}} - -import x.y; -import x.; // expected-error {{expected a module name after module import}} -import .x; // expected-error {{expected a module name after module import}} - -import blarg; // expected-error {{module 'blarg' not found}} diff --git a/test/Parser/cxx-modules-interface.cppm b/test/Parser/cxx-modules-interface.cppm index f7835bd..0e7c746 100644 --- a/test/Parser/cxx-modules-interface.cppm +++ b/test/Parser/cxx-modules-interface.cppm @@ -1,13 +1,12 @@ // RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %s -o %t.pcm -verify -// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %s -o %t.pcm -verify -Dmodule=int -DERRORS +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %s -o %t.pcm -verify -DERRORS -module foo; +export module foo; #ifndef ERRORS // expected-no-diagnostics #else -// expected-error@-4 {{expected module declaration at start of module interface}} +// FIXME: diagnose missing module-declaration when building module interface -// FIXME: support 'export module X;' and 'export { int n; module X; }' // FIXME: proclaimed-ownership-declarations? export { diff --git a/test/Preprocessor/init.c b/test/Preprocessor/init.c index 7548fba..5fb2011 100644 --- a/test/Preprocessor/init.c +++ b/test/Preprocessor/init.c @@ -8846,6 +8846,16 @@ // WEBASSEMBLY32-NEXT:#define __CHAR32_TYPE__ unsigned int // WEBASSEMBLY32-NEXT:#define __CHAR_BIT__ 8 // WEBASSEMBLY32-NOT:#define __CHAR_UNSIGNED__ +// WEBASSEMBLY32-NEXT:#define __CLANG_ATOMIC_BOOL_LOCK_FREE 2 +// WEBASSEMBLY32-NEXT:#define __CLANG_ATOMIC_CHAR16_T_LOCK_FREE 2 +// WEBASSEMBLY32-NEXT:#define __CLANG_ATOMIC_CHAR32_T_LOCK_FREE 2 +// WEBASSEMBLY32-NEXT:#define __CLANG_ATOMIC_CHAR_LOCK_FREE 2 +// WEBASSEMBLY32-NEXT:#define __CLANG_ATOMIC_INT_LOCK_FREE 2 +// WEBASSEMBLY32-NEXT:#define __CLANG_ATOMIC_LLONG_LOCK_FREE 1 +// WEBASSEMBLY32-NEXT:#define __CLANG_ATOMIC_LONG_LOCK_FREE 2 +// WEBASSEMBLY32-NEXT:#define __CLANG_ATOMIC_POINTER_LOCK_FREE 2 +// WEBASSEMBLY32-NEXT:#define __CLANG_ATOMIC_SHORT_LOCK_FREE 2 +// WEBASSEMBLY32-NEXT:#define __CLANG_ATOMIC_WCHAR_T_LOCK_FREE 2 // WEBASSEMBLY32-NEXT:#define __CONSTANT_CFSTRINGS__ 1 // WEBASSEMBLY32-NEXT:#define __DBL_DECIMAL_DIG__ 17 // WEBASSEMBLY32-NEXT:#define __DBL_DENORM_MIN__ 4.9406564584124654e-324 @@ -9162,6 +9172,16 @@ // WEBASSEMBLY64-NEXT:#define __CHAR32_TYPE__ unsigned int // WEBASSEMBLY64-NEXT:#define __CHAR_BIT__ 8 // WEBASSEMBLY64-NOT:#define __CHAR_UNSIGNED__ +// WEBASSEMBLY64-NEXT:#define __CLANG_ATOMIC_BOOL_LOCK_FREE 2 +// WEBASSEMBLY64-NEXT:#define __CLANG_ATOMIC_CHAR16_T_LOCK_FREE 2 +// WEBASSEMBLY64-NEXT:#define __CLANG_ATOMIC_CHAR32_T_LOCK_FREE 2 +// WEBASSEMBLY64-NEXT:#define __CLANG_ATOMIC_CHAR_LOCK_FREE 2 +// WEBASSEMBLY64-NEXT:#define __CLANG_ATOMIC_INT_LOCK_FREE 2 +// WEBASSEMBLY64-NEXT:#define __CLANG_ATOMIC_LLONG_LOCK_FREE 2 +// WEBASSEMBLY64-NEXT:#define __CLANG_ATOMIC_LONG_LOCK_FREE 2 +// WEBASSEMBLY64-NEXT:#define __CLANG_ATOMIC_POINTER_LOCK_FREE 2 +// WEBASSEMBLY64-NEXT:#define __CLANG_ATOMIC_SHORT_LOCK_FREE 2 +// WEBASSEMBLY64-NEXT:#define __CLANG_ATOMIC_WCHAR_T_LOCK_FREE 2 // WEBASSEMBLY64-NEXT:#define __CONSTANT_CFSTRINGS__ 1 // WEBASSEMBLY64-NEXT:#define __DBL_DECIMAL_DIG__ 17 // WEBASSEMBLY64-NEXT:#define __DBL_DENORM_MIN__ 4.9406564584124654e-324 @@ -9637,3 +9657,36 @@ // AVR:#define __WCHAR_MAX__ 32767 // AVR:#define __WCHAR_TYPE__ int // AVR:#define __WINT_TYPE__ int + + +// RUN: %clang_cc1 -E -dM -ffreestanding \ +// RUN: -triple i686-windows-msvc -fms-compatibility < /dev/null \ +// RUN: | FileCheck -match-full-lines -check-prefix MSVC-X32 %s + +// RUN: %clang_cc1 -E -dM -ffreestanding \ +// RUN: -triple x86_64-windows-msvc -fms-compatibility < /dev/null \ +// RUN: | FileCheck -match-full-lines -check-prefix MSVC-X64 %s + +// MSVC-X32:#define __CLANG_ATOMIC_BOOL_LOCK_FREE 2 +// MSVC-X32-NEXT:#define __CLANG_ATOMIC_CHAR16_T_LOCK_FREE 2 +// MSVC-X32-NEXT:#define __CLANG_ATOMIC_CHAR32_T_LOCK_FREE 2 +// MSVC-X32-NEXT:#define __CLANG_ATOMIC_CHAR_LOCK_FREE 2 +// MSVC-X32-NEXT:#define __CLANG_ATOMIC_INT_LOCK_FREE 2 +// MSVC-X32-NEXT:#define __CLANG_ATOMIC_LLONG_LOCK_FREE 2 +// MSVC-X32-NEXT:#define __CLANG_ATOMIC_LONG_LOCK_FREE 2 +// MSVC-X32-NEXT:#define __CLANG_ATOMIC_POINTER_LOCK_FREE 2 +// MSVC-X32-NEXT:#define __CLANG_ATOMIC_SHORT_LOCK_FREE 2 +// MSVC-X32-NEXT:#define __CLANG_ATOMIC_WCHAR_T_LOCK_FREE 2 +// MSVC-X32-NOT:#define __GCC_ATOMIC{{.*}} + +// MSVC-X64:#define __CLANG_ATOMIC_BOOL_LOCK_FREE 2 +// MSVC-X64-NEXT:#define __CLANG_ATOMIC_CHAR16_T_LOCK_FREE 2 +// MSVC-X64-NEXT:#define __CLANG_ATOMIC_CHAR32_T_LOCK_FREE 2 +// MSVC-X64-NEXT:#define __CLANG_ATOMIC_CHAR_LOCK_FREE 2 +// MSVC-X64-NEXT:#define __CLANG_ATOMIC_INT_LOCK_FREE 2 +// MSVC-X64-NEXT:#define __CLANG_ATOMIC_LLONG_LOCK_FREE 2 +// MSVC-X64-NEXT:#define __CLANG_ATOMIC_LONG_LOCK_FREE 2 +// MSVC-X64-NEXT:#define __CLANG_ATOMIC_POINTER_LOCK_FREE 2 +// MSVC-X64-NEXT:#define __CLANG_ATOMIC_SHORT_LOCK_FREE 2 +// MSVC-X64-NEXT:#define __CLANG_ATOMIC_WCHAR_T_LOCK_FREE 2 +// MSVC-X86-NOT:#define __GCC_ATOMIC{{.*}} diff --git a/test/Sema/atomic-ops.c b/test/Sema/atomic-ops.c index 8ebf3ea..eee1cda 100644 --- a/test/Sema/atomic-ops.c +++ b/test/Sema/atomic-ops.c @@ -7,19 +7,29 @@ struct S { char c[3]; }; _Static_assert(__GCC_ATOMIC_BOOL_LOCK_FREE == 2, ""); +_Static_assert(__GCC_ATOMIC_BOOL_LOCK_FREE == __CLANG_ATOMIC_BOOL_LOCK_FREE, ""); _Static_assert(__GCC_ATOMIC_CHAR_LOCK_FREE == 2, ""); +_Static_assert(__GCC_ATOMIC_CHAR_LOCK_FREE == __CLANG_ATOMIC_CHAR_LOCK_FREE, ""); _Static_assert(__GCC_ATOMIC_CHAR16_T_LOCK_FREE == 2, ""); +_Static_assert(__GCC_ATOMIC_CHAR16_T_LOCK_FREE == __CLANG_ATOMIC_CHAR16_T_LOCK_FREE, ""); _Static_assert(__GCC_ATOMIC_CHAR32_T_LOCK_FREE == 2, ""); +_Static_assert(__GCC_ATOMIC_CHAR32_T_LOCK_FREE == __CLANG_ATOMIC_CHAR32_T_LOCK_FREE, ""); _Static_assert(__GCC_ATOMIC_WCHAR_T_LOCK_FREE == 2, ""); +_Static_assert(__GCC_ATOMIC_WCHAR_T_LOCK_FREE == __CLANG_ATOMIC_WCHAR_T_LOCK_FREE, ""); _Static_assert(__GCC_ATOMIC_SHORT_LOCK_FREE == 2, ""); +_Static_assert(__GCC_ATOMIC_SHORT_LOCK_FREE == __CLANG_ATOMIC_SHORT_LOCK_FREE, ""); _Static_assert(__GCC_ATOMIC_INT_LOCK_FREE == 2, ""); +_Static_assert(__GCC_ATOMIC_INT_LOCK_FREE == __CLANG_ATOMIC_INT_LOCK_FREE, ""); _Static_assert(__GCC_ATOMIC_LONG_LOCK_FREE == 2, ""); +_Static_assert(__GCC_ATOMIC_LONG_LOCK_FREE == __CLANG_ATOMIC_LONG_LOCK_FREE, ""); #ifdef __i386__ _Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 1, ""); #else _Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 2, ""); #endif +_Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == __CLANG_ATOMIC_LLONG_LOCK_FREE, ""); _Static_assert(__GCC_ATOMIC_POINTER_LOCK_FREE == 2, ""); +_Static_assert(__GCC_ATOMIC_POINTER_LOCK_FREE == __CLANG_ATOMIC_POINTER_LOCK_FREE, ""); _Static_assert(__c11_atomic_is_lock_free(1), ""); _Static_assert(__c11_atomic_is_lock_free(2), ""); diff --git a/test/Sema/warn-documentation.cpp b/test/Sema/warn-documentation.cpp index 34d8f5f..0c92b2a 100644 --- a/test/Sema/warn-documentation.cpp +++ b/test/Sema/warn-documentation.cpp @@ -1210,3 +1210,75 @@ template T test_function (T arg); /*! @function test_function */ template <> int test_function (int arg); + +namespace AllowParamAndReturnsOnFunctionPointerVars { + +/** + * functionPointerVariable + * + * @param i is integer. + * @returns integer. + */ +int (*functionPointerVariable)(int i); + +struct HasFields { + /** + * functionPointerField + * + * @param i is integer. + * @returns integer. + */ + int (*functionPointerField)(int i); +}; + +// expected-warning@+5 {{'\returns' command used in a comment that is attached to a function returning void}} +/** + * functionPointerVariable + * + * \param p not here. + * \returns integer. + */ +void (*functionPointerVariableThatLeadsNowhere)(); + +// Still warn about param/returns commands for variables that don't specify +// the type directly: + +/** + * FunctionPointerTypedef + * + * \param i is integer. + * \returns integer. + */ +typedef int (*FunctionPointerTypedef)(int i); + +/** + * FunctionPointerTypealias + * + * \param i is integer. + * \returns integer. + */ +using FunctionPointerTypealias = int (*)(int i); + +// expected-warning@+5 {{'@param' command used in a comment that is not attached to a function declaration}} +// expected-warning@+5 {{'@returns' command used in a comment that is not attached to a function or method declaration}} +/** + * functionPointerVariable + * + * @param i is integer. + * @returns integer. + */ +FunctionPointerTypedef functionPointerTypedefVariable; + +struct HasMoreFields { + // expected-warning@+5 {{'\param' command used in a comment that is not attached to a function declaration}} + // expected-warning@+5 {{'\returns' command used in a comment that is not attached to a function or method declaration}} + /** + * functionPointerTypealiasField + * + * \param i is integer. + * \returns integer. + */ + FunctionPointerTypealias functionPointerTypealiasField; +}; + +} diff --git a/test/Sema/warn-documentation.m b/test/Sema/warn-documentation.m index 5e95e2a..3c1a369 100644 --- a/test/Sema/warn-documentation.m +++ b/test/Sema/warn-documentation.m @@ -229,3 +229,73 @@ int FooBar(); - (void) VarArgMeth : (id)arg, ... {} @end +/** + * blockPointerVariable + * + * @param i is integer. + * @returns integer. + */ +int (^blockPointerVariable)(int i); + +struct HasFields { + /** + * blockPointerField + * + * \param i is integer. + * \returns integer. + */ + int (^blockPointerFields)(int i); +}; + +// expected-warning@+5 {{'\returns' command used in a comment that is attached to a function returning void}} +/** + * functionPointerVariable + * + * \param p not here. + * \returns integer. + */ +void (^_Nullable blockPointerVariableThatLeadsNowhere)(); + +@interface CheckFunctionBlockPointerVars { + /** + * functionPointerIVar + * + * @param i is integer. + * @returns integer. + */ + int (*functionPointerIVar)(int i); + + /** + * blockPointerIVar + * + * \param i is integer. + * \returns integer. + */ + int (^blockPointerIVar)(int i); +} + +/** + * functionPointerProperty + * + * @param i is integer. + * @returns integer. + */ +@property int (*functionPointerProperty)(int i); + +/** + * blockPointerProperty + * + * \param i is integer. + * \returns integer. + */ +@property int (^blockPointerProperty)(int i); + +/** + * blockReturnsNothing + * + * \returns Nothing, but can allow this as this pattern is used to document the + * value that the property getter returns. + */ +@property void (^blockReturnsNothing)(); + +@end diff --git a/test/SemaCXX/MicrosoftCompatibility-cxx98.cpp b/test/SemaCXX/MicrosoftCompatibility-cxx98.cpp deleted file mode 100644 index 3b80d09..0000000 --- a/test/SemaCXX/MicrosoftCompatibility-cxx98.cpp +++ /dev/null @@ -1,23 +0,0 @@ -// RUN: %clang_cc1 %s -triple i686-pc-win32 -fsyntax-only -std=c++98 -Wmicrosoft -verify -fms-compatibility -fexceptions -fcxx-exceptions - - -//MSVC allows forward enum declaration -enum ENUM; // expected-warning {{forward references to 'enum' types are a Microsoft extension}} -ENUM *var = 0; -ENUM var2 = (ENUM)3; -enum ENUM1* var3 = 0;// expected-warning {{forward references to 'enum' types are a Microsoft extension}} - -typedef void (*FnPtrTy)(); -void (*PR23733_1)() = static_cast((void *)0); // expected-warning {{static_cast between pointer-to-function and pointer-to-object is a Microsoft extension}} -void (*PR23733_2)() = FnPtrTy((void *)0); -void (*PR23733_3)() = (FnPtrTy)((void *)0); -void (*PR23733_4)() = reinterpret_cast((void *)0); - -long function_prototype(int a); -long (*function_ptr)(int a); - -void function_to_voidptr_conv() { - void *a1 = function_prototype; // expected-warning {{implicit conversion between pointer-to-function and pointer-to-object is a Microsoft extension}} - void *a2 = &function_prototype; // expected-warning {{implicit conversion between pointer-to-function and pointer-to-object is a Microsoft extension}} - void *a3 = function_ptr; // expected-warning {{implicit conversion between pointer-to-function and pointer-to-object is a Microsoft extension}} -} diff --git a/test/SemaCXX/MicrosoftCompatibility.cpp b/test/SemaCXX/MicrosoftCompatibility.cpp index 26cd782..d095f6e 100644 --- a/test/SemaCXX/MicrosoftCompatibility.cpp +++ b/test/SemaCXX/MicrosoftCompatibility.cpp @@ -224,6 +224,15 @@ template void function_missing_typename(const D::Type param); } +//MSVC allows forward enum declaration +enum ENUM; // expected-warning {{forward references to 'enum' types are a Microsoft extension}} +ENUM *var = 0; +ENUM var2 = (ENUM)3; +enum ENUM1* var3 = 0;// expected-warning {{forward references to 'enum' types are a Microsoft extension}} + +enum ENUM1 { kA }; +enum ENUM1; // This way round is fine. + enum ENUM2 { ENUM2_a = (enum ENUM2) 4, ENUM2_b = 0x9FFFFFFF, // expected-warning {{enumerator value is not representable in the underlying type 'int'}} @@ -269,3 +278,18 @@ void g() { f(0xffffffffffffffffi64); } } + +typedef void (*FnPtrTy)(); +void (*PR23733_1)() = static_cast((void *)0); // expected-warning {{static_cast between pointer-to-function and pointer-to-object is a Microsoft extension}} +void (*PR23733_2)() = FnPtrTy((void *)0); +void (*PR23733_3)() = (FnPtrTy)((void *)0); +void (*PR23733_4)() = reinterpret_cast((void *)0); + +long function_prototype(int a); +long (*function_ptr)(int a); + +void function_to_voidptr_conv() { + void *a1 = function_prototype; // expected-warning {{implicit conversion between pointer-to-function and pointer-to-object is a Microsoft extension}} + void *a2 = &function_prototype; // expected-warning {{implicit conversion between pointer-to-function and pointer-to-object is a Microsoft extension}} + void *a3 = function_ptr; // expected-warning {{implicit conversion between pointer-to-function and pointer-to-object is a Microsoft extension}} +} diff --git a/test/SemaCXX/MicrosoftExtensions.cpp b/test/SemaCXX/MicrosoftExtensions.cpp index 96088a0..38949e1 100644 --- a/test/SemaCXX/MicrosoftExtensions.cpp +++ b/test/SemaCXX/MicrosoftExtensions.cpp @@ -2,6 +2,7 @@ // RUN: %clang_cc1 -std=c++98 %s -triple i686-pc-win32 -fsyntax-only -Wmicrosoft -Wc++11-extensions -Wno-long-long -verify -fms-extensions -fexceptions -fcxx-exceptions -DTEST1 // RUN: %clang_cc1 -std=c++11 %s -triple i686-pc-win32 -fsyntax-only -Wmicrosoft -Wc++11-extensions -Wno-long-long -verify -fms-extensions -fexceptions -fcxx-exceptions -DTEST1 // RUN: %clang_cc1 %s -triple i686-pc-win32 -fsyntax-only -Wmicrosoft -Wc++11-extensions -Wno-long-long -verify -fexceptions -fcxx-exceptions -DTEST2 +// RUN: %clang_cc1 %s -triple i686-pc-win32 -fsyntax-only -std=c++11 -fms-compatibility -verify -DTEST3 #if TEST1 @@ -508,6 +509,13 @@ int S::fn() { return 0; } // expected-warning {{is missing exception specificati // Check that __unaligned is not recognized if MS extensions are not enabled typedef char __unaligned *aligned_type; // expected-error {{expected ';' after top level declarator}} +#elif TEST3 + +namespace PR32750 { +template struct A {}; +template struct B : A> { A::C::D d; }; // expected-error {{missing 'typename' prior to dependent type name 'A::C::D'}} +} + #else #error Unknown test mode diff --git a/test/SemaCXX/modules-ts.cppm b/test/SemaCXX/modules-ts.cppm index 71c09d3..16695f6 100644 --- a/test/SemaCXX/modules-ts.cppm +++ b/test/SemaCXX/modules-ts.cppm @@ -1,22 +1,21 @@ // RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %s -o %t.pcm -verify -DTEST=0 -// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %s -o %t.pcm -verify -Dmodule=int -DTEST=1 -// RUN: not %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %s -fmodule-file=%t.pcm -o %t.pcm -DTEST=2 2>&1 | FileCheck %s --check-prefix=CHECK-2 +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %s -o %t.pcm -verify -DTEST=1 +// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %s -fmodule-file=%t.pcm -o %t.pcm -verify -DTEST=2 // RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %s -fmodule-file=%t.pcm -o %t.pcm -verify -Dfoo=bar -DTEST=3 #if TEST == 0 // expected-no-diagnostics #endif -module foo; -#if TEST == 1 -// expected-error@-2 {{expected module declaration at start of module interface}} -#elif TEST == 2 -// CHECK-2: error: redefinition of module 'foo' +export module foo; +#if TEST == 2 +// expected-error@-2 {{redefinition of module 'foo'}} +// expected-note@modules-ts.cppm:* {{loaded from}} #endif static int m; // ok, internal linkage, so no redefinition error int n; -#if TEST == 3 +#if TEST >= 2 // expected-error@-2 {{redefinition of '}} // expected-note@-3 {{previous}} #endif diff --git a/test/SemaObjC/unguarded-availability.m b/test/SemaObjC/unguarded-availability.m index 4369a0e..ae921f4 100644 --- a/test/SemaObjC/unguarded-availability.m +++ b/test/SemaObjC/unguarded-availability.m @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-macosx-10.9 -Wunguarded-availability -fblocks -fsyntax-only -verify %s -// RUN: %clang_cc1 -xobjective-c++ -DOBJCPP -triple x86_64-apple-macosx-10.9 -Wunguarded-availability -fblocks -fsyntax-only -verify %s +// RUN: %clang_cc1 -xobjective-c++ -std=c++11 -DOBJCPP -triple x86_64-apple-macosx-10.9 -Wunguarded-availability -fblocks -fsyntax-only -verify %s #define AVAILABLE_10_0 __attribute__((availability(macos, introduced = 10.0))) #define AVAILABLE_10_11 __attribute__((availability(macos, introduced = 10.11))) @@ -8,9 +8,9 @@ int func_10_11() AVAILABLE_10_11; // expected-note 4 {{'func_10_11' has been explicitly marked partial here}} #ifdef OBJCPP -// expected-note@+2 {{marked partial here}} +// expected-note@+2 2 {{marked partial here}} #endif -int func_10_12() AVAILABLE_10_12; // expected-note 5 {{'func_10_12' has been explicitly marked partial here}} +int func_10_12() AVAILABLE_10_12; // expected-note 6 {{'func_10_12' has been explicitly marked partial here}} int func_10_0() AVAILABLE_10_0; @@ -129,6 +129,12 @@ void test_params(int_10_12 x); // expected-warning {{'int_10_12' is partial: int void test_params2(int_10_12 x) AVAILABLE_10_12; // no warn +void (^topLevelBlockDecl)() = ^ { + func_10_12(); // expected-warning{{'func_10_12' is only available on macOS 10.12 or newer}} expected-note{{@available}} + if (@available(macos 10.12, *)) + func_10_12(); +}; + #ifdef OBJCPP int f(char) AVAILABLE_10_12; @@ -176,4 +182,10 @@ int instantiate_availability() { with_availability_attr(); // expected-warning{{'with_availability_attr' is only available on macOS 10.11 or newer}} expected-warning{{'int_10_12' is only available on macOS 10.12 or newer}} expected-note 2 {{enclose}} } +auto topLevelLambda = [] () { + func_10_12(); // expected-warning{{'func_10_12' is only available on macOS 10.12 or newer}} expected-note{{@available}} + if (@available(macos 10.12, *)) + func_10_12(); +}; + #endif diff --git a/test/SemaObjCXX/pr32725.mm b/test/SemaObjCXX/pr32725.mm new file mode 100644 index 0000000..8d7d116 --- /dev/null +++ b/test/SemaObjCXX/pr32725.mm @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -triple x86_64-apple-macosx -fsyntax-only -verify -x objective-c++ %s -o /dev/null +// REQUIRES: asserts + +struct objc_class { + unsigned long long bits; +}; +typedef struct objc_class *Class; +static void f(Class c) { (void)(c->bits & RW_HAS_OVERFLOW_REFCOUNT); } +// expected-error@-1{{use of undeclared identifier 'RW_HAS_OVERFLOW_REFCOUNT}} diff --git a/test/SemaOpenCL/cl20-device-side-enqueue.cl b/test/SemaOpenCL/cl20-device-side-enqueue.cl index 3e87cfc..bafbe44 100644 --- a/test/SemaOpenCL/cl20-device-side-enqueue.cl +++ b/test/SemaOpenCL/cl20-device-side-enqueue.cl @@ -1,12 +1,14 @@ -// RUN: %clang_cc1 %s -cl-std=CL2.0 -triple "spir-unknown-unknown" -verify -pedantic -fsyntax-only -DB32 -// RUN: %clang_cc1 %s -cl-std=CL2.0 -triple "spir64-unknown-unknown" -verify -pedantic -fsyntax-only -Wconversion -DWCONV +// RUN: %clang_cc1 %s -cl-std=CL2.0 -triple "spir-unknown-unknown" -verify -pedantic -fsyntax-only -DB32 -DQUALS= +// RUN: %clang_cc1 %s -cl-std=CL2.0 -triple "spir-unknown-unknown" -verify -pedantic -fsyntax-only -DB32 -DQUALS="const volatile" +// RUN: %clang_cc1 %s -cl-std=CL2.0 -triple "spir64-unknown-unknown" -verify -pedantic -fsyntax-only -Wconversion -DWCONV -DQUALS= +// RUN: %clang_cc1 %s -cl-std=CL2.0 -triple "spir64-unknown-unknown" -verify -pedantic -fsyntax-only -Wconversion -DWCONV -DQUALS="const volatile" typedef struct {int a;} ndrange_t; // Diagnostic tests for different overloads of enqueue_kernel from Table 6.13.17.1 of OpenCL 2.0 Spec. kernel void enqueue_kernel_tests() { queue_t default_queue; unsigned flags = 0; - ndrange_t ndrange; + QUALS ndrange_t ndrange; clk_event_t evt; clk_event_t event_wait_list; clk_event_t event_wait_list2[] = {evt, evt}; diff --git a/tools/c-index-test/c-index-test.c b/tools/c-index-test/c-index-test.c index 0dde4c7..e8763ff 100644 --- a/tools/c-index-test/c-index-test.c +++ b/tools/c-index-test/c-index-test.c @@ -3014,6 +3014,7 @@ static const char *getEntityLanguageString(CXIdxEntityLanguage kind) { case CXIdxEntityLang_C: return "C"; case CXIdxEntityLang_ObjC: return "ObjC"; case CXIdxEntityLang_CXX: return "C++"; + case CXIdxEntityLang_Swift: return "Swift"; } assert(0 && "Garbage language kind"); return 0; diff --git a/tools/clang-format/ClangFormat.cpp b/tools/clang-format/ClangFormat.cpp index ac0d0a8..14bff19 100644 --- a/tools/clang-format/ClangFormat.cpp +++ b/tools/clang-format/ClangFormat.cpp @@ -276,14 +276,17 @@ static bool format(StringRef FileName) { } // Get new affected ranges after sorting `#includes`. Ranges = tooling::calculateRangesAfterReplacements(Replaces, Ranges); - bool IncompleteFormat = false; + FormattingAttemptStatus Status; Replacements FormatChanges = reformat(*FormatStyle, *ChangedCode, Ranges, - AssumedFileName, &IncompleteFormat); + AssumedFileName, &Status); Replaces = Replaces.merge(FormatChanges); if (OutputXML) { outs() << "\n\n"; + << (Status.FormatComplete ? "false" : "true") << "'"; + if (!Status.FormatComplete) + outs() << " line=" << Status.Line; + outs() << ">\n"; if (Cursor.getNumOccurrences() != 0) outs() << "" << FormatChanges.getShiftedCodePosition(CursorPosition) @@ -307,11 +310,15 @@ static bool format(StringRef FileName) { if (Rewrite.overwriteChangedFiles()) return true; } else { - if (Cursor.getNumOccurrences() != 0) + if (Cursor.getNumOccurrences() != 0) { outs() << "{ \"Cursor\": " << FormatChanges.getShiftedCodePosition(CursorPosition) << ", \"IncompleteFormat\": " - << (IncompleteFormat ? "true" : "false") << " }\n"; + << (Status.FormatComplete ? "false" : "true"); + if (!Status.FormatComplete) + outs() << ", \"Line\": " << Status.Line; + outs() << " }\n"; + } Rewrite.getEditBuffer(ID).write(outs()); } } diff --git a/tools/clang-format/git-clang-format b/tools/clang-format/git-clang-format index 74fd451..3d1ba8a 100755 --- a/tools/clang-format/git-clang-format +++ b/tools/clang-format/git-clang-format @@ -23,6 +23,7 @@ git clang-format -h Requires Python 2.7 """ +from __future__ import print_function import argparse import collections import contextlib @@ -138,15 +139,15 @@ def main(): if opts.verbose >= 1: ignored_files.difference_update(changed_lines) if ignored_files: - print 'Ignoring changes in the following files (wrong extension):' + print('Ignoring changes in the following files (wrong extension):') for filename in ignored_files: - print ' ', filename + print(' %s' % filename) if changed_lines: - print 'Running clang-format on the following files:' + print('Running clang-format on the following files:') for filename in changed_lines: - print ' ', filename + print(' %s' % filename) if not changed_lines: - print 'no modified files to format' + print('no modified files to format') return # The computed diff outputs absolute paths, so we must cd before accessing # those files. @@ -163,20 +164,20 @@ def main(): binary=opts.binary, style=opts.style) if opts.verbose >= 1: - print 'old tree:', old_tree - print 'new tree:', new_tree + print('old tree: %s' % old_tree) + print('new tree: %s' % new_tree) if old_tree == new_tree: if opts.verbose >= 0: - print 'clang-format did not modify any files' + print('clang-format did not modify any files') elif opts.diff: print_diff(old_tree, new_tree) else: changed_files = apply_changes(old_tree, new_tree, force=opts.force, patch_mode=opts.patch) if (opts.verbose >= 0 and not opts.patch) or opts.verbose >= 1: - print 'changed files:' + print('changed files:') for filename in changed_files: - print ' ', filename + print(' %s' % filename) def load_git_config(non_string_options=None): @@ -320,7 +321,7 @@ def filter_by_extension(dictionary, allowed_extensions): `allowed_extensions` must be a collection of lowercase file extensions, excluding the period.""" allowed_extensions = frozenset(allowed_extensions) - for filename in dictionary.keys(): + for filename in list(dictionary.keys()): base_ext = filename.rsplit('.', 1) if len(base_ext) == 1 or base_ext[1].lower() not in allowed_extensions: del dictionary[filename] @@ -344,8 +345,13 @@ def run_clang_format_and_save_to_tree(changed_lines, revision=None, """Run clang-format on each file and save the result to a git tree. Returns the object ID (SHA-1) of the created tree.""" + def iteritems(container): + try: + return container.iteritems() # Python 2 + except AttributeError: + return container.items() # Python 3 def index_info_generator(): - for filename, line_ranges in changed_lines.iteritems(): + for filename, line_ranges in iteritems(changed_lines): if revision: git_metadata_cmd = ['git', 'ls-tree', '%s:%s' % (revision, os.path.dirname(filename)), @@ -356,6 +362,9 @@ def run_clang_format_and_save_to_tree(changed_lines, revision=None, mode = oct(int(stdout.split()[0], 8)) else: mode = oct(os.stat(filename).st_mode) + # Adjust python3 octal format so that it matches what git expects + if mode.startswith('0o'): + mode = '0' + mode[2:] blob_id = clang_format_to_blob(filename, line_ranges, revision=revision, binary=binary, @@ -488,10 +497,10 @@ def apply_changes(old_tree, new_tree, force=False, patch_mode=False): if not force: unstaged_files = run('git', 'diff-files', '--name-status', *changed_files) if unstaged_files: - print >>sys.stderr, ('The following files would be modified but ' - 'have unstaged changes:') - print >>sys.stderr, unstaged_files - print >>sys.stderr, 'Please commit, stage, or stash them first.' + print('The following files would be modified but ' + 'have unstaged changes:', file=sys.stderr) + print(unstaged_files, file=sys.stderr) + print('Please commit, stage, or stash them first.', file=sys.stderr) sys.exit(2) if patch_mode: # In patch mode, we could just as well create an index from the new tree @@ -521,20 +530,20 @@ def run(*args, **kwargs): if p.returncode == 0: if stderr: if verbose: - print >>sys.stderr, '`%s` printed to stderr:' % ' '.join(args) - print >>sys.stderr, stderr.rstrip() + print('`%s` printed to stderr:' % ' '.join(args), file=sys.stderr) + print(stderr.rstrip(), file=sys.stderr) if strip: stdout = stdout.rstrip('\r\n') return stdout if verbose: - print >>sys.stderr, '`%s` returned %s' % (' '.join(args), p.returncode) + print('`%s` returned %s' % (' '.join(args), p.returncode), file=sys.stderr) if stderr: - print >>sys.stderr, stderr.rstrip() + print(stderr.rstrip(), file=sys.stderr) sys.exit(2) def die(message): - print >>sys.stderr, 'error:', message + print('error:', message, file=sys.stderr) sys.exit(2) diff --git a/tools/libclang/CXIndexDataConsumer.cpp b/tools/libclang/CXIndexDataConsumer.cpp index 5d9776b..9cd5ff4 100644 --- a/tools/libclang/CXIndexDataConsumer.cpp +++ b/tools/libclang/CXIndexDataConsumer.cpp @@ -1315,6 +1315,7 @@ static CXIdxEntityLanguage getEntityLangFromSymbolLang(SymbolLanguage L) { case SymbolLanguage::C: return CXIdxEntityLang_C; case SymbolLanguage::ObjC: return CXIdxEntityLang_ObjC; case SymbolLanguage::CXX: return CXIdxEntityLang_CXX; + case SymbolLanguage::Swift: return CXIdxEntityLang_Swift; } llvm_unreachable("invalid symbol language"); } diff --git a/tools/libclang/CXType.cpp b/tools/libclang/CXType.cpp index 16e993e..fce7ef2 100644 --- a/tools/libclang/CXType.cpp +++ b/tools/libclang/CXType.cpp @@ -147,9 +147,6 @@ static inline CXTranslationUnit GetTU(CXType CT) { static Optional> GetTemplateArguments(QualType Type) { assert(!Type.isNull()); - if (const auto *Specialization = Type->getAs()) - return Specialization->template_arguments(); - if (const auto *RecordDecl = Type->getAsCXXRecordDecl()) { const auto *TemplateDecl = dyn_cast(RecordDecl); @@ -157,6 +154,9 @@ GetTemplateArguments(QualType Type) { return TemplateDecl->getTemplateArgs().asArray(); } + if (const auto *Specialization = Type->getAs()) + return Specialization->template_arguments(); + return None; } diff --git a/unittests/Format/CleanupTest.cpp b/unittests/Format/CleanupTest.cpp index 6ac5f69..e554a81 100644 --- a/unittests/Format/CleanupTest.cpp +++ b/unittests/Format/CleanupTest.cpp @@ -292,7 +292,7 @@ protected: } inline std::string apply(StringRef Code, - const tooling::Replacements Replaces) { + const tooling::Replacements &Replaces) { auto CleanReplaces = cleanupAroundReplacements(Code, Replaces, Style); EXPECT_TRUE(static_cast(CleanReplaces)) << llvm::toString(CleanReplaces.takeError()) << "\n"; @@ -302,8 +302,7 @@ protected: } inline std::string formatAndApply(StringRef Code, - const tooling::Replacements Replaces) { - + const tooling::Replacements &Replaces) { auto CleanReplaces = cleanupAroundReplacements(Code, Replaces, Style); EXPECT_TRUE(static_cast(CleanReplaces)) << llvm::toString(CleanReplaces.takeError()) << "\n"; diff --git a/unittests/Format/FormatTest.cpp b/unittests/Format/FormatTest.cpp index e6ba223..9b833a9 100644 --- a/unittests/Format/FormatTest.cpp +++ b/unittests/Format/FormatTest.cpp @@ -30,24 +30,25 @@ FormatStyle getGoogleStyle() { return getGoogleStyle(FormatStyle::LK_Cpp); } class FormatTest : public ::testing::Test { protected: - enum IncompleteCheck { - IC_ExpectComplete, - IC_ExpectIncomplete, - IC_DoNotCheck + enum StatusCheck { + SC_ExpectComplete, + SC_ExpectIncomplete, + SC_DoNotCheck }; std::string format(llvm::StringRef Code, const FormatStyle &Style = getLLVMStyle(), - IncompleteCheck CheckIncomplete = IC_ExpectComplete) { + StatusCheck CheckComplete = SC_ExpectComplete) { DEBUG(llvm::errs() << "---\n"); DEBUG(llvm::errs() << Code << "\n\n"); std::vector Ranges(1, tooling::Range(0, Code.size())); - bool IncompleteFormat = false; + FormattingAttemptStatus Status; tooling::Replacements Replaces = - reformat(Style, Code, Ranges, "", &IncompleteFormat); - if (CheckIncomplete != IC_DoNotCheck) { - bool ExpectedIncompleteFormat = CheckIncomplete == IC_ExpectIncomplete; - EXPECT_EQ(ExpectedIncompleteFormat, IncompleteFormat) << Code << "\n\n"; + reformat(Style, Code, Ranges, "", &Status); + if (CheckComplete != SC_DoNotCheck) { + bool ExpectedCompleteFormat = CheckComplete == SC_ExpectComplete; + EXPECT_EQ(ExpectedCompleteFormat, Status.FormatComplete) + << Code << "\n\n"; } ReplacementCount = Replaces.size(); auto Result = applyAllReplacements(Code, Replaces); @@ -83,7 +84,7 @@ protected: void verifyIncompleteFormat(llvm::StringRef Code, const FormatStyle &Style = getLLVMStyle()) { EXPECT_EQ(Code.str(), - format(test::messUp(Code), Style, IC_ExpectIncomplete)); + format(test::messUp(Code), Style, SC_ExpectIncomplete)); } void verifyGoogleFormat(llvm::StringRef Code) { @@ -98,7 +99,7 @@ protected: /// \brief Verify that clang-format does not crash on the given input. void verifyNoCrash(llvm::StringRef Code, const FormatStyle &Style = getLLVMStyle()) { - format(Code, Style, IC_DoNotCheck); + format(Code, Style, SC_DoNotCheck); } int ReplacementCount; @@ -5220,6 +5221,12 @@ TEST_F(FormatTest, FormatsFunctionTypes) { verifyFormat("void f() { function(*some_pointer_var)[0] = 10; }"); verifyFormat("int x = f(&h)();"); verifyFormat("returnsFunction(¶m1, ¶m2)(param);"); + verifyFormat("std::function<\n" + " LooooooooooongTemplatedType<\n" + " SomeType>*(\n" + " LooooooooooooooooongType type)>\n" + " function;", + getGoogleStyleWithColumns(40)); } TEST_F(FormatTest, FormatsPointersToArrayTypes) { @@ -6189,7 +6196,7 @@ TEST_F(FormatTest, SkipsDeeplyNestedLines) { // Deeply nested part is untouched, rest is formatted. EXPECT_EQ(std::string("int i;\n") + Code + "int j;\n", format(std::string("int i;\n") + Code + "int j;\n", - getLLVMStyle(), IC_ExpectIncomplete)); + getLLVMStyle(), SC_ExpectIncomplete)); } //===----------------------------------------------------------------------===// diff --git a/unittests/Format/FormatTestComments.cpp b/unittests/Format/FormatTestComments.cpp index e631015..4238efe 100644 --- a/unittests/Format/FormatTestComments.cpp +++ b/unittests/Format/FormatTestComments.cpp @@ -29,24 +29,25 @@ FormatStyle getGoogleStyle() { return getGoogleStyle(FormatStyle::LK_Cpp); } class FormatTestComments : public ::testing::Test { protected: - enum IncompleteCheck { - IC_ExpectComplete, - IC_ExpectIncomplete, - IC_DoNotCheck + enum StatusCheck { + SC_ExpectComplete, + SC_ExpectIncomplete, + SC_DoNotCheck }; std::string format(llvm::StringRef Code, const FormatStyle &Style = getLLVMStyle(), - IncompleteCheck CheckIncomplete = IC_ExpectComplete) { + StatusCheck CheckComplete = SC_ExpectComplete) { DEBUG(llvm::errs() << "---\n"); DEBUG(llvm::errs() << Code << "\n\n"); std::vector Ranges(1, tooling::Range(0, Code.size())); - bool IncompleteFormat = false; + FormattingAttemptStatus Status; tooling::Replacements Replaces = - reformat(Style, Code, Ranges, "", &IncompleteFormat); - if (CheckIncomplete != IC_DoNotCheck) { - bool ExpectedIncompleteFormat = CheckIncomplete == IC_ExpectIncomplete; - EXPECT_EQ(ExpectedIncompleteFormat, IncompleteFormat) << Code << "\n\n"; + reformat(Style, Code, Ranges, "", &Status); + if (CheckComplete != SC_DoNotCheck) { + bool ExpectedCompleteFormat = CheckComplete == SC_ExpectComplete; + EXPECT_EQ(ExpectedCompleteFormat, Status.FormatComplete) + << Code << "\n\n"; } ReplacementCount = Replaces.size(); auto Result = applyAllReplacements(Code, Replaces); @@ -73,7 +74,7 @@ protected: /// \brief Verify that clang-format does not crash on the given input. void verifyNoCrash(llvm::StringRef Code, const FormatStyle &Style = getLLVMStyle()) { - format(Code, Style, IC_DoNotCheck); + format(Code, Style, SC_DoNotCheck); } int ReplacementCount; diff --git a/unittests/Format/FormatTestJS.cpp b/unittests/Format/FormatTestJS.cpp index d8fa8e4..7886c4f 100644 --- a/unittests/Format/FormatTestJS.cpp +++ b/unittests/Format/FormatTestJS.cpp @@ -24,10 +24,10 @@ protected: DEBUG(llvm::errs() << "---\n"); DEBUG(llvm::errs() << Code << "\n\n"); std::vector Ranges(1, tooling::Range(Offset, Length)); - bool IncompleteFormat = false; + FormattingAttemptStatus Status; tooling::Replacements Replaces = - reformat(Style, Code, Ranges, "", &IncompleteFormat); - EXPECT_FALSE(IncompleteFormat); + reformat(Style, Code, Ranges, "", &Status); + EXPECT_TRUE(Status.FormatComplete); auto Result = applyAllReplacements(Code, Replaces); EXPECT_TRUE(static_cast(Result)); DEBUG(llvm::errs() << "\n" << *Result << "\n\n"); @@ -318,6 +318,25 @@ TEST_F(FormatTestJS, MethodsInObjectLiterals) { "};"); } +TEST_F(FormatTestJS, GettersSettersVisibilityKeywords) { + // Don't break after "protected" + verifyFormat("class X {\n" + " protected get getter():\n" + " number {\n" + " return 1;\n" + " }\n" + "}", + getGoogleJSStyleWithColumns(12)); + // Don't break after "get" + verifyFormat("class X {\n" + " protected get someReallyLongGetterName():\n" + " number {\n" + " return 1;\n" + " }\n" + "}", + getGoogleJSStyleWithColumns(40)); +} + TEST_F(FormatTestJS, SpacesInContainerLiterals) { verifyFormat("var arr = [1, 2, 3];"); verifyFormat("f({a: 1, b: 2, c: 3});"); @@ -1220,6 +1239,9 @@ TEST_F(FormatTestJS, MetadataAnnotations) { "}"); verifyFormat("class X {}\n" "class Y {}"); + verifyFormat("class X {\n" + " @property() private isReply = false;\n" + "}\n"); } TEST_F(FormatTestJS, TypeAliases) { diff --git a/unittests/Format/FormatTestObjC.cpp b/unittests/Format/FormatTestObjC.cpp index 680ff92..d2dc8bc 100644 --- a/unittests/Format/FormatTestObjC.cpp +++ b/unittests/Format/FormatTestObjC.cpp @@ -33,23 +33,24 @@ protected: Style.Language = FormatStyle::LK_ObjC; } - enum IncompleteCheck { - IC_ExpectComplete, - IC_ExpectIncomplete, - IC_DoNotCheck + enum StatusCheck { + SC_ExpectComplete, + SC_ExpectIncomplete, + SC_DoNotCheck }; std::string format(llvm::StringRef Code, - IncompleteCheck CheckIncomplete = IC_ExpectComplete) { + StatusCheck CheckComplete = SC_ExpectComplete) { DEBUG(llvm::errs() << "---\n"); DEBUG(llvm::errs() << Code << "\n\n"); std::vector Ranges(1, tooling::Range(0, Code.size())); - bool IncompleteFormat = false; + FormattingAttemptStatus Status; tooling::Replacements Replaces = - reformat(Style, Code, Ranges, "", &IncompleteFormat); - if (CheckIncomplete != IC_DoNotCheck) { - bool ExpectedIncompleteFormat = CheckIncomplete == IC_ExpectIncomplete; - EXPECT_EQ(ExpectedIncompleteFormat, IncompleteFormat) << Code << "\n\n"; + reformat(Style, Code, Ranges, "", &Status); + if (CheckComplete != SC_DoNotCheck) { + bool ExpectedCompleteFormat = CheckComplete == SC_ExpectComplete; + EXPECT_EQ(ExpectedCompleteFormat, Status.FormatComplete) + << Code << "\n\n"; } auto Result = applyAllReplacements(Code, Replaces); EXPECT_TRUE(static_cast(Result)); @@ -62,7 +63,7 @@ protected: } void verifyIncompleteFormat(StringRef Code) { - EXPECT_EQ(Code.str(), format(test::messUp(Code), IC_ExpectIncomplete)); + EXPECT_EQ(Code.str(), format(test::messUp(Code), SC_ExpectIncomplete)); } FormatStyle Style; diff --git a/unittests/Format/FormatTestSelective.cpp b/unittests/Format/FormatTestSelective.cpp index 367dbaf..8046d7f 100644 --- a/unittests/Format/FormatTestSelective.cpp +++ b/unittests/Format/FormatTestSelective.cpp @@ -24,10 +24,10 @@ protected: DEBUG(llvm::errs() << "---\n"); DEBUG(llvm::errs() << Code << "\n\n"); std::vector Ranges(1, tooling::Range(Offset, Length)); - bool IncompleteFormat = false; + FormattingAttemptStatus Status; tooling::Replacements Replaces = - reformat(Style, Code, Ranges, "", &IncompleteFormat); - EXPECT_FALSE(IncompleteFormat) << Code << "\n\n"; + reformat(Style, Code, Ranges, "", &Status); + EXPECT_TRUE(Status.FormatComplete) << Code << "\n\n"; auto Result = applyAllReplacements(Code, Replaces); EXPECT_TRUE(static_cast(Result)); DEBUG(llvm::errs() << "\n" << *Result << "\n\n"); diff --git a/unittests/Format/NamespaceEndCommentsFixerTest.cpp b/unittests/Format/NamespaceEndCommentsFixerTest.cpp index 912638f..6c2d369 100644 --- a/unittests/Format/NamespaceEndCommentsFixerTest.cpp +++ b/unittests/Format/NamespaceEndCommentsFixerTest.cpp @@ -23,7 +23,7 @@ class NamespaceEndCommentsFixerTest : public ::testing::Test { protected: std::string fixNamespaceEndComments(llvm::StringRef Code, - std::vector Ranges, + const std::vector &Ranges, const FormatStyle &Style = getLLVMStyle()) { DEBUG(llvm::errs() << "---\n"); DEBUG(llvm::errs() << Code << "\n\n");