diff --git a/.clang-tidy b/.clang-tidy index 7d343ea..616d471 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,8 +1,10 @@ --- Checks: -'clang-diagnostic-*,clang-analyzer-*,performance-*,readability-*,modernize-*,bugprone-*,misc-*,-modernize-use-trailing-return-type' +'clang-diagnostic-*,clang-analyzer-*,performance-*,readability-*,modernize-*,bugprone-*,misc-*, +-modernize-use-trailing-return-type,-bugprone-easily-swappable-parameters,-readability-identifier-length' WarningsAsErrors: '*' HeaderFilterRegex: 'include/aws/.*\.h$' +ExcludeHeaderFilter: 'build/_deps/gtest-src.*' FormatStyle: 'none' CheckOptions: - key: modernize-pass-by-value.ValuesOnly diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml new file mode 100644 index 0000000..495f6a6 --- /dev/null +++ b/.github/workflows/code-quality.yml @@ -0,0 +1,58 @@ +name: "Code Quality" + +on: + push: + branches: [ "master" ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ "master" ] + schedule: + - cron: '32 14 * * 2' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'cpp' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Install libcurl dependency + - name: Install dependencies + run: sudo apt-get update && sudo apt install -y libcurl4-openssl-dev + + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + + - run: | + echo "Run, CMake build script" + cmake -B ${{github.workspace}}/build -DBUILD_SHARED_LIBS=ON -DBUILD_TESTING=OFF + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + with: + category: "/language:${{matrix.language}}" + diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml new file mode 100644 index 0000000..7da0628 --- /dev/null +++ b/.github/workflows/workflow.yml @@ -0,0 +1,63 @@ +name: Validate Project + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +env: + # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.) + BUILD_TYPE: Debug + +jobs: + build: + strategy: + matrix: + arch: [ubuntu-latest, ubuntu-24.04-arm] + runs-on: ${{ matrix.arch }} + + steps: + - uses: actions/checkout@v3 + - name: Install Dependencies + run: sudo apt-get update && sudo apt-get install -y clang-tidy libcurl4-openssl-dev + + - name: Configure CMake + run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DCMAKE_CXX_CLANG_TIDY=clang-tidy -DENABLE_TESTS=ON + + - name: Build It + run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}} + + - name: Test It + run: cd build && make && ctest + + build-demo: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Install Dependencies + run: sudo apt-get update && sudo apt-get install -y clang-tidy libcurl4-openssl-dev + + - name: Build and install lambda runtime + run: | + mkdir build && cd build + cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=~/lambda-install + make + make install + + - name: Build and package demo project + run: | + cd examples/demo + mkdir build && cd build + cmake .. -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=~/lambda-install + make + make aws-lambda-package-demo + + format: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Check Formatting + run: ./ci/codebuild/format-check.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index b3869a6..09e226f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,11 +1,12 @@ cmake_minimum_required(VERSION 3.9) set(CMAKE_CXX_STANDARD 11) project(aws-lambda-runtime - VERSION 0.2.6 + VERSION 0.0.0 LANGUAGES CXX) -option(ENABLE_LTO "Enables link-time optimization, requires compiler support." ON) +option(ENABLE_LTO "Enables link-time optimization, requires compiler support." OFF) option(ENABLE_TESTS "Enables building the test project, requires AWS C++ SDK." OFF) +option(ENABLE_SANITIZERS "Enables ASan and UBSan." OFF) add_library(${PROJECT_NAME} "src/logging.cpp" @@ -16,12 +17,17 @@ add_library(${PROJECT_NAME} set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 0 - VERSION ${PROJECT_VERSION}) + VERSION ${PROJECT_VERSION}-dev) target_include_directories(${PROJECT_NAME} PUBLIC $ $) +if (ENABLE_SANITIZERS) + target_compile_options(${PROJECT_NAME} PUBLIC "-fsanitize=address,undefined") + target_link_libraries(${PROJECT_NAME} PUBLIC "-fsanitize=address,undefined") +endif() + if (ENABLE_LTO) include(CheckIPOSupported) check_ipo_supported(RESULT has_lto OUTPUT lto_check_output) @@ -41,6 +47,30 @@ endif() target_include_directories(${PROJECT_NAME} PRIVATE ${CURL_INCLUDE_DIRS}) +find_package(Backtrace QUIET) +if (${Backtrace_FOUND}) + target_link_libraries(${PROJECT_NAME} PRIVATE ${Backtrace_LIBRARIES}) + + find_library(DW_LIB NAMES dw) + if (NOT DW_LIB STREQUAL DW_LIB-NOTFOUND) + message("-- Enhanced stack-traces are enabled via libdw: ${DW_LIB}") + target_compile_definitions(${PROJECT_NAME} PRIVATE "BACKWARD_HAS_DW=1") + target_link_libraries(${PROJECT_NAME} PUBLIC "${DW_LIB}") + else() + find_library(BFD_LIB NAMES bfd) + if (NOT BFD_LIB STREQUAL BFD_LIB-NOTFOUND) + message("-- Enhanced stack-traces are enabled via libbfd: ${BFD_LIB}") + target_compile_definitions(${PROJECT_NAME} PRIVATE "BACKWARD_HAS_BFD=1") + target_link_libraries(${PROJECT_NAME} PRIVATE "${BFD_LIB}") + endif() + endif() + +else() + message("-- libbacktrace was not installed. Stacktracing will be disabled") + add_definitions(-Dno_backtrace) +endif() + + target_compile_options(${PROJECT_NAME} PRIVATE "-fno-exceptions" "-fno-rtti" @@ -52,20 +82,6 @@ target_compile_options(${PROJECT_NAME} PRIVATE "-Wconversion" "-Wno-sign-conversion") -find_library(DW_LIB NAMES dw) -if (NOT DW_LIB STREQUAL DW_LIB-NOTFOUND) - message("-- Enhanced stack-traces are enabled via libdw: ${DW_LIB}") - target_compile_definitions(${PROJECT_NAME} PRIVATE "BACKWARD_HAS_DW=1") - target_link_libraries(${PROJECT_NAME} PUBLIC "${DW_LIB}") -else() - find_library(BFD_LIB NAMES bfd) - if (NOT BFD_LIB STREQUAL BFD_LIB-NOTFOUND) - message("-- Enhanced stack-traces are enabled via libbfd: ${BFD_LIB}") - target_compile_definitions(${PROJECT_NAME} PRIVATE "BACKWARD_HAS_BFD=1") - target_link_libraries(${PROJECT_NAME} PRIVATE "${BFD_LIB}") - endif() -endif() - if (LOG_VERBOSITY) target_compile_definitions(${PROJECT_NAME} PRIVATE "AWS_LAMBDA_LOG=${LOG_VERBOSITY}") elseif(CMAKE_BUILD_TYPE STREQUAL Debug) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e8c3aa5..d6b300a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -43,6 +43,24 @@ GitHub provides additional document on [forking a repository](https://help.githu ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-lambda-cpp-runtime/labels/help%20wanted) issues is a great place to start. +## Running the Integration Tests Locally + +The integration testing for the project creates, invokes, and deletes, Lambda functions. +These tests typically run AWS CodeBuild, but may also be executed locally + +Prerequisites: +* install Docker +* configure AWS credentials, need at least permissions to Create, Delete, and Invoke Lambda functions +* an IAM role, named exactly `integration-tests`, must exist in the account. + * The role must also be assumable by Lambda. + * (optional) attach AWSLambdaBasicExecutionRole managed policy to the role, so that the test function logs are saved to CloudWatch + +Then, to iterate on a single workflow: +``` +docker build -t lambda-cpp-amazon-linux-2 -f ./ci/docker/amazon-linux-2 . +./ci/codebuild_build.sh -c -a /tmp -i lambda-cpp-amazon-linux-2 -b ./ci/codebuild/amazon-linux-2.yml +``` + ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). diff --git a/README.md b/README.md index 0812476..0f58b28 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,15 @@ [![GitHub](https://img.shields.io/github/license/awslabs/aws-lambda-cpp.svg)](https://github.com/awslabs/aws-lambda-cpp/blob/master/LICENSE) -![CodeBuild](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiQkN1b0srbWtnUjNibFVyL2psNmdaM0l4RnVQNzVBeG84QnQvUjRmOEJVdXdHUXMxZ25iWnFZQUtGTkUxVGJhcGZaVEhXY2JOSTFHTlkvaGF2RDRIZlpVPSIsIml2UGFyYW1ldGVyU3BlYyI6IjRiS3hlRjFxVFZHSWViQmQiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master) -[![Language grade: C/C++](https://img.shields.io/lgtm/grade/cpp/g/awslabs/aws-lambda-cpp.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/awslabs/aws-lambda-cpp/context:cpp) +![Code Quality badge](https://github.com/awslabs/aws-lambda-cpp/actions/workflows/code-quality.yml/badge.svg) + +| OS | Arch | Status | +|----|------|--------| +| Amazon Linux 2 | x86_64 | [![](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiQ1EvQXE0ODBLK0VnQitMaVdvZ1J0QkhTMlpNbk8wS0lRbWZvRDlPSHB0V0VXb1VLazdSdzRMWHhMeUdpYjdOT1hCc1hjL3BKei96ZVpzeTdrMVd4c3BRPSIsIml2UGFyYW1ldGVyU3BlYyI6IkhjTTNoSzJwb1hldk9zZFYiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master)](https://us-west-2.codebuild.aws.amazon.com/project/eyJlbmNyeXB0ZWREYXRhIjoicnpvbytDV0grMHh2c09ONi9kQ3ZuOVVwckxISElKRENEVy9CL0pvd3VvLzQwZ21pdzBOdGtNWUFLRy9VRkw1NldSMmRlVXV5R0NhN1k1OWI0bDY1N2MyMzR2SmhseWlma0hmWTlBUkwzcVp0TEJlQm1RPT0iLCJpdlBhcmFtZXRlclNwZWMiOiI3MzM4WDUybk9hSkl1bllRIiwibWF0ZXJpYWxTZXRTZXJpYWwiOjF9) | +| Amazon Linux 2 | aarch64 | [![](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoicWNGSmJtaGdPSCtqR25KQ1k3RWNZS1pwWlZScGZ3WU1JM0lISnZJVkhVNy8zbVIyVHp6RlBmRjN4cjZJd2xWNEd0eWZmUy9JaE1vRzBYWFcrbnpFdDUwPSIsIml2UGFyYW1ldGVyU3BlYyI6ImVoeHl5TTNtMmdERjJuWisiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master)](https://us-west-2.codebuild.aws.amazon.com/project/eyJlbmNyeXB0ZWREYXRhIjoiVUVaNzBYMXVjUUl1djdlS3pTSXVxMUhKcHB4ZC96ZjlDOWM3bUxiRmtITnVGYzdxTDJveFY3eVFqanpHbzhYRUdWVjVhZFhnOGt0NldETEVMamN0alRoZzYwMyszU1lVMjJNR0lUWGNCQjVYNzhuUzZwZ0ptZz09IiwiaXZQYXJhbWV0ZXJTcGVjIjoicmtKaUVoM2pmUVdibVZuOSIsIm1hdGVyaWFsU2V0U2VyaWFsIjoxfQ%3D%3D) | +| Amazon Linux (ALAMI) | x86_64 | [![](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiWUNqeG9FcmUyQzVSaUkydFd6UkU5Sm42cTViSExXOFZURHRBQlM0dDJDOThMWEFYLzN4NitQR0w1ZzNKcjAwOVNUYXY5ZUljU1hzcEtrU0N0dEhUN0M0PSIsIml2UGFyYW1ldGVyU3BlYyI6ImtYU0ZjSzh3ekFKazlBVVUiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master)](https://us-west-2.codebuild.aws.amazon.com/project/eyJlbmNyeXB0ZWREYXRhIjoiTEJJVVFIOXp6VjUvWExqODN1K1NPQmRTVm9iQy9ZK2tmKzkrbVdTNlh1LzV1UlpQL2lPN1Faak0yc0pOaGpEVlRpai9yS3JCRjBRQU5lMVFVU1hRU1hyekxpVi8yNWV0ZE44SElWdlRpNld4bmkwdE1oQjcxN0NtIiwiaXZQYXJhbWV0ZXJTcGVjIjoiZnBBUi9uOU8yVjJ4RENpRyIsIm1hdGVyaWFsU2V0U2VyaWFsIjoxfQ%3D%3D) | +| Alpine | x86_64 | [![](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiTkhhOEJGNjVOTG5NZWVNWDNjSGNEdWEwY0J2ZUNLMkE2aU83UVdYc3VMU0V5b1JqdXY0OXUxNkxYRDUxU0VJOTByL3NLUTE3djBMNWh2VldXdk0xamJZPSIsIml2UGFyYW1ldGVyU3BlYyI6ImQxSjc2Vnd3czF2QWphRS8iLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master)](https://us-west-2.codebuild.aws.amazon.com/project/eyJlbmNyeXB0ZWREYXRhIjoiQzJVUzZML1dLTkpRNGcxSjVyUXVEd1BCY2poZUhydWZLeGE5MGU1c05vNDVObG44bnpKZFhlZVJKSm50ZnpaalRENUxxOHpPNGdPTDRlTGc4WW81UHd4L3hCeTgyTm5vRVR0RW5FempKdk00aDlPRk02WGQiLCJpdlBhcmFtZXRlclNwZWMiOiJUMFhCQktLMExQMXc3Q0lHIiwibWF0ZXJpYWxTZXRTZXJpYWwiOjF9) | +| Arch Linux | x86_64 | [![](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoib2cxaHp3bE5ndWhWR0RIRkxxQzRwR1dHa05DWmQ0bENnWGNHYzM2YmR3OFRHNWpPYStGYUM1WXBQVUNoZjJRa2xrZVpuRXVyWVVvQVNzNExqSlN5TGEwPSIsIml2UGFyYW1ldGVyU3BlYyI6Ii9zSjVybGNsNEJMUEZwSlUiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master)](https://us-west-2.codebuild.aws.amazon.com/project/eyJlbmNyeXB0ZWREYXRhIjoiRWVOYlA5OHZqUVVLUTZLYlJzZmdOQkR5dmpVSTBPS1h1M3RxQkxXa3pyMC9OOUw5dDJlUDcyYm05Q3pBOEZ1aWJFYkFBajFGZ3RJWUM5WkpoZUE4K0IrdFIvYytyNVRYREpQTUNHL05vTXlLQ0E9PSIsIml2UGFyYW1ldGVyU3BlYyI6InFuS1hJY3JTaWpSWENLM1EiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D) | +| Ubuntu 18.04 | x86_64 | [![](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiVkhsbmdlYkk3M1JESVdiTHc0elpobXEvUk4wRWlBZUpEZzdmem1QbGJRZ3dMbVE2RWZpbHZjNmVCd0dJaUFSZ1pzQVlyZ1dvdndWTjZSRjg0WDRYRFh3PSIsIml2UGFyYW1ldGVyU3BlYyI6IjJic2dnR3ZpTEQyMmRPMXQiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master)](https://us-west-2.codebuild.aws.amazon.com/project/eyJlbmNyeXB0ZWREYXRhIjoiSlNPak1vQmVBR3JnUlAwRWg2N3hHRHF1U2Z6RkQvY1NHRHM4RTJ0WEFBdjFTSzBzY21kZEpPMDk2QXdwRStUWUZmWWFmTkRkU1FGa0lQUGoxbU9GNU45QVJ1YVkzZkY0dmsxV2FRZVljakt3UmJpdTM2a0JnQT09IiwiaXZQYXJhbWV0ZXJTcGVjIjoieE5LSUlmNVN1UWdqbWg0cSIsIm1hdGVyaWFsU2V0U2VyaWFsIjoxfQ%3D%3D) | + ## AWS Lambda C++ Runtime C++ implementation of the lambda runtime API @@ -18,7 +27,7 @@ Make sure you have the following packages installed first: 1. git 1. Make or Ninja 1. zip -1. libcurl-devel (on Debian-basded distros it's libcurl4-openssl-dev) +1. libcurl-devel (on Debian-based distros it's libcurl4-openssl-dev) In a terminal, run the following commands: ```bash @@ -30,6 +39,23 @@ $ cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=~/lambda-install $ make && make install ``` +### Running Unit Tests Locally + +To run the unit tests locally, follow these steps to build: + +```bash +$ cd aws-lambda-cpp +$ mkdir build +$ cd build +$ cmake .. -DCMAKE_BUILD_TYPE=Debug -DENABLE_TESTS=ON +$ make +``` + +Run unit tests: +```bash +$ ctest +``` + To consume this library in a project that is also using CMake, you would do: ```cmake @@ -59,7 +85,7 @@ static invocation_response my_handler(invocation_request const& req) "error type here" /*error_type*/); } - return invocation_response::success("json payload here" /*payload*/, + return invocation_response::success("{\"message:\":\"I fail if body length is bigger than 42!\"}" /*payload*/, "application/json" /*MIME type*/); } @@ -121,13 +147,19 @@ And finally, create the Lambda function: ``` $ aws lambda create-function --function-name demo \ --role \ ---runtime provided --timeout 15 --memory-size 128 \ +--runtime provided.al2023 --timeout 15 --memory-size 128 \ --handler demo --zip-file fileb://demo.zip ``` +> **N.B.** If you are building on `arm64`, you have to explicitly add the param `--architectures arm64`, so that you are setting up the proper architecture on AWS to run your supplied Lambda function. And to invoke the function: ```bash -$ aws lambda invoke --function-name demo --payload '{"answer":42}' output.txt +$ aws lambda invoke --function-name demo --cli-binary-format raw-in-base64-out --payload '{"answer":42}' output.txt +``` + +You can update your supplied function: +```bash +$ aws lambda update-function-code --function-name demo --zip-file fileb://demo.zip ``` ## Using the C++ SDK for AWS with this runtime @@ -141,7 +173,7 @@ Any *fully* compliant C++11 compiler targeting GNU/Linux x86-64 should work. Ple - Use Clang v3.3 or above ## Packaging, ABI, GNU C Library, Oh My! -Lambda runs your code on some version of Amazon Linux. It would be a less than ideal customer experience if you are forced to build your application on that platform and that platform only. +Lambda runs your code on some version of Amazon Linux. It would be a less than ideal customer experience if you are forced to build your application on that platform and that platform only. However, the freedom to build on any linux distro brings a challenge. The GNU C Library ABI. There is no guarantee the platform used to build the Lambda function has the same GLIBC version as the one used by AWS Lambda. In fact, you might not even be using GNU's implementation. For example you could build a C++ Lambda function using musl libc. @@ -187,10 +219,12 @@ curl_easy_setopt(curl_handle, CURLOPT_CAINFO, "/etc/pki/tls/certs/ca-bundle.crt" ```bash $ aws lambda create-function --function-name demo \ --role \ - --runtime provided --timeout 15 --memory-size 128 \ + --runtime provided.al2023 --timeout 15 --memory-size 128 \ --handler demo --code "S3Bucket=mys3bucket,S3Key=demo.zip" ``` +> **N.B.** See hint above if you are building on `arm64`. + 1. **My code is crashing, how can I debug it?** - Starting with [v0.2.0](https://github.com/awslabs/aws-lambda-cpp/releases/tag/v0.2.0) you should see a stack-trace of the crash site in the logs (which are typically stored in CloudWatch). diff --git a/ci/README.md b/ci/README.md new file mode 100644 index 0000000..9e50181 --- /dev/null +++ b/ci/README.md @@ -0,0 +1,14 @@ +## AWS CodeBuild Stack Setup + +create or update the stack +``` +aws cloudformation deploy --capabilities CAPABILITY_IAM --stack-name aws-lambda-cpp-ci --template-file codebuild.yml +``` + +(optional) trigger docker build and docker push of the build environment images. +A project to do this is pre-configured in the deployed stack. +``` +aws cloudformation describe-stacks --stack-name aws-lambda-cpp-ci --query "Stacks[].Outputs[].OutputValue" +# run command output from above, will look like: +# aws codebuild start-build --project-name +``` diff --git a/ci/codebuild.yml b/ci/codebuild.yml new file mode 100644 index 0000000..1205407 --- /dev/null +++ b/ci/codebuild.yml @@ -0,0 +1,412 @@ + +Parameters: + GitHub: + Type: String + Default: https://github.com/awslabs/aws-lambda-cpp.git + +Resources: + + ECR: + Type: AWS::ECR::Repository + + LambdaTestRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: lambda.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: can-log + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Resource: + - !Join [':', [ arn:aws:logs, !Ref AWS::Region, !Ref AWS::AccountId, log-group:/aws/lambda/lambda-cpp-* ] ] + - !Join [':', [ arn:aws:logs, !Ref AWS::Region, !Ref AWS::AccountId, log-group:/aws/lambda/lambda-cpp-*:* ] ] + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + + LogsAccessRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: codebuild.amazonaws.com + Action: + - sts:AssumeRole + Policies: + - PolicyName: readthelogs + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Resource: + - !Join [':', [ arn:aws:logs, !Ref AWS::Region, !Ref AWS::AccountId, log-group:/aws/codebuild/aws-lambda-cpp-ci:* ] ] + Action: + - logs:GetLogEvents + + CodeBuildRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codebuild.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: thepolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Resource: + - !Join [':', [ arn:aws:logs, !Ref AWS::Region, !Ref AWS::AccountId, log-group:/aws/codebuild/aws-lambda-cpp-ci ] ] + - !Join [':', [ arn:aws:logs, !Ref AWS::Region, !Ref AWS::AccountId, log-group:/aws/codebuild/aws-lambda-cpp-ci:* ] ] + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + - Effect: Allow + Resource: + - !Join [ '', [ arn:aws:s3:::codepipeline-, !Ref AWS::Region, -* ] ] + Action: + - s3:PutObject + - s3:GetObject + - s3:GetObjectVersion + - s3:GetBucketAcl + - s3:GetBucketLocation + - Effect: Allow + Resource: + - !Join [ ':', [ arn:aws:codebuild, !Ref AWS::Region, !Ref AWS::AccountId, report-group/test-* ] ] + Action: + - codebuild:CreateReportGroup + - codebuild:CreateReport + - codebuild:UpdateReport + - codebuild:BatchPutTestCases + - codebuild:BatchPutCodeCoverages + - Effect: Allow + Resource: + - '*' + Action: + - ecr:GetAuthorizationToken + - Effect: Allow + Resource: + - !GetAtt ECR.Arn + Action: + # pulling + - ecr:BatchCheckLayerAvailability + - ecr:GetDownloadUrlForLayer + - ecr:BatchGetImage + # pushing + - ecr:CompleteLayerUpload + - ecr:GetAuthorizationToken + - ecr:UploadLayerPart + - ecr:InitiateLayerUpload + - ecr:BatchCheckLayerAvailability + - ecr:PutImage + - Effect: Allow + Resource: + - !GetAtt LambdaTestRole.Arn + Action: + - iam:GetRole + - iam:PassRole + - Effect: Allow + Resource: + - !Join [':', [ arn:aws:lambda, !Ref AWS::Region, !Ref AWS::AccountId, function:lambda-cpp-* ] ] + Action: + - lambda:CreateFunction + - lambda:DeleteFunction + - lambda:InvokeFunction + + UpdateArmBuildEnvironments: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: NO_ARTIFACTS + BadgeEnabled: True + Visibility: PUBLIC_READ + ConcurrentBuildLimit: 1 + ServiceRole: !GetAtt CodeBuildRole.Arn + ResourceAccessRole: !GetAtt LogsAccessRole.Arn + LogsConfig: + CloudWatchLogs: + Status: ENABLED + GroupName: /aws/codebuild/aws-lambda-cpp-ci + Environment: + ImagePullCredentialsType: CODEBUILD + ComputeType: BUILD_GENERAL1_SMALL + Image: aws/codebuild/amazonlinux2-aarch64-standard:2.0 + Type: ARM_CONTAINER + PrivilegedMode: True + EnvironmentVariables: + - Name: ECR_NAME + Type: PLAINTEXT + Value: !Ref ECR + Source: + Type: GITHUB + Location: !Ref GitHub + BuildSpec: | + version: 0.2 + phases: + build: + commands: + - ./ci/update-images.sh + + UpdateX86BuildEnvironments: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: NO_ARTIFACTS + BadgeEnabled: True + Visibility: PUBLIC_READ + ConcurrentBuildLimit: 1 + ServiceRole: !GetAtt CodeBuildRole.Arn + ResourceAccessRole: !GetAtt LogsAccessRole.Arn + LogsConfig: + CloudWatchLogs: + Status: ENABLED + GroupName: /aws/codebuild/aws-lambda-cpp-ci + Environment: + ImagePullCredentialsType: CODEBUILD + ComputeType: BUILD_GENERAL1_MEDIUM + Image: aws/codebuild/amazonlinux2-x86_64-standard:4.0 + Type: LINUX_CONTAINER + PrivilegedMode: True + EnvironmentVariables: + - Name: ECR_NAME + Type: PLAINTEXT + Value: !Ref ECR + Source: + Type: GITHUB + Location: !Ref GitHub + BuildSpec: | + version: 0.2 + phases: + build: + commands: + - ./ci/update-images.sh + + + Amazon2Arm: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: NO_ARTIFACTS + BadgeEnabled: True + Visibility: PUBLIC_READ + ConcurrentBuildLimit: 1 + ServiceRole: !GetAtt CodeBuildRole.Arn + ResourceAccessRole: !GetAtt LogsAccessRole.Arn + LogsConfig: + CloudWatchLogs: + Status: ENABLED + GroupName: /aws/codebuild/aws-lambda-cpp-ci + Triggers: + BuildType: BUILD + Webhook: True + FilterGroups: + - - Type: EVENT + Pattern: PUSH,PULL_REQUEST_CREATED,PULL_REQUEST_UPDATED + Environment: + ImagePullCredentialsType: SERVICE_ROLE + ComputeType: BUILD_GENERAL1_SMALL + Type: ARM_CONTAINER + Image: !Join [ ':', [ !GetAtt ECR.RepositoryUri, amazon-linux-2-linux-arm64 ]] + EnvironmentVariables: + - Name: LAMBDA_TEST_ROLE + Type: PLAINTEXT + Value: !Ref LambdaTestRole + Source: + Type: GITHUB + Location: !Ref GitHub + BuildSpec: ci/codebuild/amazonlinux-2.yml + + Amazon2: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: NO_ARTIFACTS + BadgeEnabled: True + Visibility: PUBLIC_READ + ConcurrentBuildLimit: 1 + ServiceRole: !GetAtt CodeBuildRole.Arn + ResourceAccessRole: !GetAtt LogsAccessRole.Arn + LogsConfig: + CloudWatchLogs: + Status: ENABLED + GroupName: /aws/codebuild/aws-lambda-cpp-ci + Triggers: + BuildType: BUILD + Webhook: True + FilterGroups: + - - Type: EVENT + Pattern: PUSH,PULL_REQUEST_CREATED,PULL_REQUEST_UPDATED + Environment: + ImagePullCredentialsType: SERVICE_ROLE + ComputeType: BUILD_GENERAL1_SMALL + Type: LINUX_CONTAINER + Image: !Join [ ':', [ !GetAtt ECR.RepositoryUri, amazon-linux-2-linux-amd64 ]] + EnvironmentVariables: + - Name: LAMBDA_TEST_ROLE + Type: PLAINTEXT + Value: !Ref LambdaTestRole + Source: + Type: GITHUB + Location: !Ref GitHub + BuildSpec: ci/codebuild/amazonlinux-2.yml + + Amazon201803: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: NO_ARTIFACTS + BadgeEnabled: True + Visibility: PUBLIC_READ + ConcurrentBuildLimit: 1 + ServiceRole: !GetAtt CodeBuildRole.Arn + ResourceAccessRole: !GetAtt LogsAccessRole.Arn + LogsConfig: + CloudWatchLogs: + Status: ENABLED + GroupName: /aws/codebuild/aws-lambda-cpp-ci + Triggers: + BuildType: BUILD + Webhook: True + FilterGroups: + - - Type: EVENT + Pattern: PUSH,PULL_REQUEST_CREATED,PULL_REQUEST_UPDATED + Environment: + ImagePullCredentialsType: SERVICE_ROLE + ComputeType: BUILD_GENERAL1_SMALL + Type: LINUX_CONTAINER + Image: !Join [ ':', [ !GetAtt ECR.RepositoryUri, amazon-linux-2018.03-linux-amd64 ]] + EnvironmentVariables: + - Name: LAMBDA_TEST_ROLE + Type: PLAINTEXT + Value: !Ref LambdaTestRole + Source: + Type: GITHUB + Location: !Ref GitHub + BuildSpec: ci/codebuild/amazonlinux-2018.03.yml + + Ubuntu1804: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: NO_ARTIFACTS + BadgeEnabled: True + Visibility: PUBLIC_READ + ConcurrentBuildLimit: 1 + ServiceRole: !GetAtt CodeBuildRole.Arn + ResourceAccessRole: !GetAtt LogsAccessRole.Arn + LogsConfig: + CloudWatchLogs: + Status: ENABLED + GroupName: /aws/codebuild/aws-lambda-cpp-ci + Triggers: + BuildType: BUILD + Webhook: True + FilterGroups: + - - Type: EVENT + Pattern: PUSH,PULL_REQUEST_CREATED,PULL_REQUEST_UPDATED + Environment: + ImagePullCredentialsType: SERVICE_ROLE + ComputeType: BUILD_GENERAL1_SMALL + Type: LINUX_CONTAINER + Image: !Join [ ':', [ !GetAtt ECR.RepositoryUri, ubuntu-linux-18.04-linux-amd64 ]] + EnvironmentVariables: + - Name: LAMBDA_TEST_ROLE + Type: PLAINTEXT + Value: !Ref LambdaTestRole + Source: + Type: GITHUB + Location: !Ref GitHub + BuildSpec: ci/codebuild/ubuntu-18.04.yml + + Alpine315: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: NO_ARTIFACTS + BadgeEnabled: True + Visibility: PUBLIC_READ + ConcurrentBuildLimit: 1 + ServiceRole: !GetAtt CodeBuildRole.Arn + ResourceAccessRole: !GetAtt LogsAccessRole.Arn + LogsConfig: + CloudWatchLogs: + Status: ENABLED + GroupName: /aws/codebuild/aws-lambda-cpp-ci + Triggers: + BuildType: BUILD + Webhook: True + FilterGroups: + - - Type: EVENT + Pattern: PUSH,PULL_REQUEST_CREATED,PULL_REQUEST_UPDATED + Environment: + ImagePullCredentialsType: SERVICE_ROLE + ComputeType: BUILD_GENERAL1_SMALL + Type: LINUX_CONTAINER + Image: !Join [ ':', [ !GetAtt ECR.RepositoryUri, alpine-linux-3.15-linux-amd64 ]] + EnvironmentVariables: + - Name: LAMBDA_TEST_ROLE + Type: PLAINTEXT + Value: !Ref LambdaTestRole + Source: + Type: GITHUB + Location: !Ref GitHub + BuildSpec: ci/codebuild/alpine-3.15.yml + + Arch: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: NO_ARTIFACTS + BadgeEnabled: True + Visibility: PUBLIC_READ + ConcurrentBuildLimit: 1 + ServiceRole: !GetAtt CodeBuildRole.Arn + ResourceAccessRole: !GetAtt LogsAccessRole.Arn + LogsConfig: + CloudWatchLogs: + Status: ENABLED + GroupName: /aws/codebuild/aws-lambda-cpp-ci + Triggers: + BuildType: BUILD + Webhook: True + FilterGroups: + - - Type: EVENT + Pattern: PUSH,PULL_REQUEST_CREATED,PULL_REQUEST_UPDATED + Environment: + ImagePullCredentialsType: SERVICE_ROLE + ComputeType: BUILD_GENERAL1_SMALL + Type: LINUX_CONTAINER + Image: !Join [ ':', [ !GetAtt ECR.RepositoryUri, arch-linux-linux-amd64 ]] + EnvironmentVariables: + - Name: LAMBDA_TEST_ROLE + Type: PLAINTEXT + Value: !Ref LambdaTestRole + Source: + Type: GITHUB + Location: !Ref GitHub + BuildSpec: ci/codebuild/arch-linux.yml + +Outputs: + BootstrapArmImages: + Description: to bootstrap or update the arm images, run the command! + Value: !Join [' ', [ aws codebuild start-build --project-name, !Ref UpdateArmBuildEnvironments ] ] + BootstrapX86Images: + Description: to bootstrap or update the arm images, run the command! + Value: !Join [' ', [ aws codebuild start-build --project-name, !Ref UpdateX86BuildEnvironments ] ] diff --git a/ci/codebuild/alpine-3.15.yml b/ci/codebuild/alpine-3.15.yml new file mode 100644 index 0000000..616fed9 --- /dev/null +++ b/ci/codebuild/alpine-3.15.yml @@ -0,0 +1,9 @@ +version: 0.2 +# This uses the docker image specified in ci/docker/alpine-linux-3.15 +phases: + build: + commands: + - echo Build started on `date` + - ./ci/codebuild/build.sh -DTEST_RESOURCE_PREFIX=lambda-cpp-alpine315 + - ./ci/codebuild/run-tests.sh aws-lambda-package-lambda-test-fun + - echo Build completed on `date` diff --git a/ci/codebuild/amazonlinux-2.yml b/ci/codebuild/amazonlinux-2.yml new file mode 100644 index 0000000..e2f6d76 --- /dev/null +++ b/ci/codebuild/amazonlinux-2.yml @@ -0,0 +1,9 @@ +version: 0.2 +phases: + build: + commands: + - echo Build started on `date` + - ./ci/codebuild/build.sh -DTEST_RESOURCE_PREFIX=lambda-cpp-al2_$(arch) + - ./ci/codebuild/run-tests.sh aws-lambda-package-lambda-test-fun + - ./ci/codebuild/run-tests.sh aws-lambda-package-lambda-test-fun-no-glibc + - echo Build completed on `date` \ No newline at end of file diff --git a/ci/codebuild/amazonlinux-2017.03.yml b/ci/codebuild/amazonlinux-2017.03.yml deleted file mode 100644 index eab1baf..0000000 --- a/ci/codebuild/amazonlinux-2017.03.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: 0.1 -# This uses the docker image specified in ci/docker/amazon-linux-2017.03 -phases: - pre_build: - commands: - - alias cmake=cmake3 - - pip install awscli - - ci/codebuild/build-cpp-sdk.sh - build: - commands: - - echo Build started on `date` - - ci/codebuild/build.sh -DENABLE_TESTS=ON -DTEST_RESOURCE_PREFIX=amzn201703 - - ci/codebuild/run-tests.sh aws-lambda-package-lambda-test-fun amzn201703 - - ci/codebuild/run-tests.sh aws-lambda-package-lambda-test-fun-no-glibc amzn201703 - post_build: - commands: - - echo Build completed on `date` - diff --git a/ci/codebuild/amazonlinux-2018.03.yml b/ci/codebuild/amazonlinux-2018.03.yml new file mode 100644 index 0000000..d4d0b5c --- /dev/null +++ b/ci/codebuild/amazonlinux-2018.03.yml @@ -0,0 +1,12 @@ +version: 0.2 +# This uses the docker image specified in ci/docker/amazon-linux-2017.03 +phases: + build: + commands: + - echo Build started on `date` + - yum install -y binutils + - ./ci/codebuild/build.sh -DTEST_RESOURCE_PREFIX=lambda-cpp-amzn201703 + - ./ci/codebuild/run-tests.sh aws-lambda-package-lambda-test-fun + - ./ci/codebuild/run-tests.sh aws-lambda-package-lambda-test-fun-no-glibc + - echo Build completed on `date` + diff --git a/ci/codebuild/arch-linux.yml b/ci/codebuild/arch-linux.yml new file mode 100644 index 0000000..0a12a5d --- /dev/null +++ b/ci/codebuild/arch-linux.yml @@ -0,0 +1,9 @@ +version: 0.2 +# This uses the docker image specified in ci/docker/arch-linux +phases: + build: + commands: + - echo Build started on `date` + - ./ci/codebuild/build.sh -DTEST_RESOURCE_PREFIX=lambda-cpp-archbtw -DENABLE_SANITIZERS=ON + - ./ci/codebuild/run-tests.sh aws-lambda-package-lambda-test-fun + - echo Build completed on `date` diff --git a/ci/codebuild/build-cpp-sdk.sh b/ci/codebuild/build-cpp-sdk.sh index 93ae7eb..7f84aab 100755 --- a/ci/codebuild/build-cpp-sdk.sh +++ b/ci/codebuild/build-cpp-sdk.sh @@ -4,12 +4,10 @@ set -euo pipefail # build the AWS C++ SDK cd /aws-sdk-cpp -git pull mkdir build cd build cmake .. -GNinja -DBUILD_ONLY="lambda" \ -DCMAKE_BUILD_TYPE=Release \ - -DENABLE_UNITY_BUILD=ON \ -DBUILD_SHARED_LIBS=ON \ -DENABLE_TESTING=OFF \ -DCMAKE_INSTALL_PREFIX=/install $@ diff --git a/ci/codebuild/build.sh b/ci/codebuild/build.sh index 53a9544..8a90d30 100755 --- a/ci/codebuild/build.sh +++ b/ci/codebuild/build.sh @@ -6,6 +6,10 @@ set -euo pipefail cd $CODEBUILD_SRC_DIR mkdir build cd build -cmake .. -GNinja -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=/install $@ +cmake .. -GNinja \ + -DBUILD_SHARED_LIBS=ON \ + -DCMAKE_BUILD_TYPE=Debug \ + -DENABLE_TESTS=ON \ + -DCMAKE_INSTALL_PREFIX=/install $@ ninja ninja install diff --git a/ci/codebuild/format-check.sh b/ci/codebuild/format-check.sh index 3afb802..035bd54 100755 --- a/ci/codebuild/format-check.sh +++ b/ci/codebuild/format-check.sh @@ -4,7 +4,7 @@ set -euo pipefail CLANG_FORMAT=clang-format -if NOT type $CLANG_FORMAT > /dev/null 2>&1; then +if ! type $CLANG_FORMAT > /dev/null 2>&1; then echo "No appropriate clang-format found." exit 1 fi @@ -13,6 +13,11 @@ FAIL=0 SOURCE_FILES=$(find src include tests -type f -name "*.h" -o -name "*.cpp") for i in $SOURCE_FILES do + if [[ "$i" == *"gtest.h" || "$i" == *"backward.h" ]]; then + continue + fi + + echo "$i\n" if [ $($CLANG_FORMAT -output-replacements-xml $i | grep -c ": format for secondary source" + echo " * For sourceIdentifier, use a value that is fewer than 128 characters and contains only alphanumeric characters and underscores" + echo " -c Use the AWS configuration and credentials from your local host. This includes ~/.aws and any AWS_* environment variables." + echo " -p Used to specify the AWS CLI Profile." + echo " -b FILE Used to specify a buildspec override file. Defaults to buildspec.yml in the source directory." + echo " -m Used to mount the source directory to the customer build container directly." + echo " -d Used to run the build container in docker privileged mode." + echo " -e FILE Used to specify a file containing environment variables." + echo " (-e) File format expectations:" + echo " * Each line is in VAR=VAL format" + echo " * Lines beginning with # are processed as comments and ignored" + echo " * Blank lines are ignored" + echo " * File can be of type .env or .txt" + echo " * There is no special handling of quotation marks, meaning they will be part of the VAL" + exit 1 +} + +image_flag=false +artifact_flag=false +awsconfig_flag=false +mount_src_dir_flag=false +docker_privileged_mode_flag=false + +while getopts "cmdi:a:r:s:b:e:l:p:h" opt; do + case $opt in + i ) image_flag=true; image_name=$OPTARG;; + a ) artifact_flag=true; artifact_dir=$OPTARG;; + r ) report_dir=$OPTARG;; + b ) buildspec=$OPTARG;; + c ) awsconfig_flag=true;; + m ) mount_src_dir_flag=true;; + d ) docker_privileged_mode_flag=true;; + s ) source_dirs+=("$OPTARG");; + e ) environment_variable_file=$OPTARG;; + l ) local_agent_image=$OPTARG;; + p ) aws_profile=$OPTARG;; + h ) usage; exit;; + \? ) echo "Unknown option: -$OPTARG" >&2; exit 1;; + : ) echo "Missing option argument for -$OPTARG" >&2; exit 1;; + * ) echo "Invalid option: -$OPTARG" >&2; exit 1;; + esac +done + +if ! $image_flag +then + echo "The image name flag (-i) must be included for a build to run" >&2 +fi + +if ! $artifact_flag +then + echo "The artifact directory (-a) must be included for a build to run" >&2 +fi + +if ! $image_flag || ! $artifact_flag +then + exit 1 +fi + +docker_command="docker run -it " +if isOSWindows +then + docker_command+="-v //var/run/docker.sock:/var/run/docker.sock -e " +else + docker_command+="-v /var/run/docker.sock:/var/run/docker.sock -e " +fi + +docker_command+="\"IMAGE_NAME=$image_name\" -e \ + \"ARTIFACTS=$(allOSRealPath "$artifact_dir")\"" + +if [ -n "$report_dir" ] +then + docker_command+=" -e \"REPORTS=$(allOSRealPath "$report_dir")\"" +fi + +if [ -z "$source_dirs" ] +then + docker_command+=" -e \"SOURCE=$(allOSRealPath "$PWD")\"" +else + for index in "${!source_dirs[@]}"; do + if [ $index -eq 0 ] + then + docker_command+=" -e \"SOURCE=$(allOSRealPath "${source_dirs[$index]}")\"" + else + identifier=${source_dirs[$index]%%:*} + src_dir=$(allOSRealPath "${source_dirs[$index]#*:}") + + docker_command+=" -e \"SECONDARY_SOURCE_$index=$identifier:$src_dir\"" + fi + done +fi + +if [ -n "$buildspec" ] +then + docker_command+=" -e \"BUILDSPEC=$(allOSRealPath "$buildspec")\"" +fi + +if [ -n "$environment_variable_file" ] +then + environment_variable_file_path=$(allOSRealPath "$environment_variable_file") + environment_variable_file_dir=$(dirname "$environment_variable_file_path") + environment_variable_file_basename=$(basename "$environment_variable_file") + docker_command+=" -v \"$environment_variable_file_dir:/LocalBuild/envFile/\" -e \"ENV_VAR_FILE=$environment_variable_file_basename\"" +fi + +if [ -n "$local_agent_image" ] +then + docker_command+=" -e \"LOCAL_AGENT_IMAGE_NAME=$local_agent_image\"" +fi + +if $awsconfig_flag +then + if [ -d "$HOME/.aws" ] + then + configuration_file_path=$(allOSRealPath "$HOME/.aws") + docker_command+=" -e \"AWS_CONFIGURATION=$configuration_file_path\"" + else + docker_command+=" -e \"AWS_CONFIGURATION=NONE\"" + fi + + if [ -n "$aws_profile" ] + then + docker_command+=" -e \"AWS_PROFILE=$aws_profile\"" + fi + + docker_command+="$(env | grep ^AWS_ | while read -r line; do echo " -e \"$line\""; done )" +fi + +if $mount_src_dir_flag +then + docker_command+=" -e \"MOUNT_SOURCE_DIRECTORY=TRUE\"" +fi + +if $docker_privileged_mode_flag +then + docker_command+=" -e \"DOCKER_PRIVILEGED_MODE=TRUE\"" +fi + +if isOSWindows +then + docker_command+=" -e \"INITIATOR=$USERNAME\"" +else + docker_command+=" -e \"INITIATOR=$USER\"" +fi + +if [ -n "$local_agent_image" ] +then + docker_command+=" $local_agent_image" +else + docker_command+=" public.ecr.aws/codebuild/local-builds:latest" +fi + +# Note we do not expose the AWS_SECRET_ACCESS_KEY or the AWS_SESSION_TOKEN +exposed_command=$docker_command +secure_variables=( "AWS_SECRET_ACCESS_KEY=" "AWS_SESSION_TOKEN=") +for variable in "${secure_variables[@]}" +do + exposed_command="$(echo $exposed_command | sed "s/\($variable\)[^ ]*/\1********\"/")" +done + +echo "Build Command:" +echo "" +echo $exposed_command +echo "" + +eval $docker_command diff --git a/ci/docker/alpine-linux-3.15 b/ci/docker/alpine-linux-3.15 new file mode 100644 index 0000000..c8d33b5 --- /dev/null +++ b/ci/docker/alpine-linux-3.15 @@ -0,0 +1,19 @@ +FROM public.ecr.aws/docker/library/alpine:3.15 + +RUN apk add --no-cache \ + bash \ + cmake \ + curl-dev \ + g++ \ + git \ + libexecinfo-dev \ + ninja \ + openssl-libs-static \ + zlib-dev \ + zip + +RUN git clone --recurse-submodules https://github.com/aws/aws-sdk-cpp.git +RUN cmake -Saws-sdk-cpp -Baws-sdk-cpp/build -GNinja \ + -DBUILD_ONLY=lambda \ + -DENABLE_TESTING=OFF +RUN cd aws-sdk-cpp/build && ninja && ninja install diff --git a/ci/docker/alpine-linux-3.8 b/ci/docker/alpine-linux-3.8 deleted file mode 100644 index a042e4c..0000000 --- a/ci/docker/alpine-linux-3.8 +++ /dev/null @@ -1,5 +0,0 @@ -FROM alpine:latest - -RUN apk update; apk add g++ cmake git ninja curl-dev zlib-dev - -RUN git clone https://github.com/aws/aws-sdk-cpp.git diff --git a/ci/docker/amazon-linux-2 b/ci/docker/amazon-linux-2 new file mode 100644 index 0000000..452d1f4 --- /dev/null +++ b/ci/docker/amazon-linux-2 @@ -0,0 +1,16 @@ +FROM public.ecr.aws/amazonlinux/amazonlinux:2 +RUN yum install -y \ + cmake3 \ + ninja-build \ + git \ + gcc-c++ \ + openssl-devel \ + curl-devel \ + openssl-static \ + zip +RUN git clone https://github.com/aws/aws-sdk-cpp --recurse-submodules +RUN cmake3 -Saws-sdk-cpp -Baws-sdk-cpp/build -DBUILD_ONLY=lambda -DENABLE_TESTING=OFF -GNinja +RUN cd aws-sdk-cpp/build && ninja-build && ninja-build install +RUN ln -s /usr/bin/cmake3 /usr/local/bin/cmake +RUN ln -s /usr/bin/ctest3 /usr/local/bin/ctest +RUN ln -s /usr/bin/ninja-build /usr/local/bin/ninja diff --git a/ci/docker/amazon-linux-2017.03 b/ci/docker/amazon-linux-2017.03 deleted file mode 100644 index f66919a..0000000 --- a/ci/docker/amazon-linux-2017.03 +++ /dev/null @@ -1,12 +0,0 @@ -FROM amazonlinux:2017.03 - -RUN yum install gcc64-c++ git ninja-build curl-devel openssl-devel zlib-devel gtest-devel python36-pip zip -y -RUN git clone https://github.com/aws/aws-sdk-cpp.git - -RUN curl -fLo cmake-install https://github.com/Kitware/CMake/releases/download/v3.13.0/cmake-3.13.0-Linux-x86_64.sh; \ -sh cmake-install --skip-license --prefix=/usr --exclude-subdirectory; - -RUN pip-3.6 install --upgrade pip - -RUN git clone https://github.com/aws/aws-sdk-cpp.git - diff --git a/ci/docker/amazon-linux-2018.03 b/ci/docker/amazon-linux-2018.03 new file mode 100644 index 0000000..142dec2 --- /dev/null +++ b/ci/docker/amazon-linux-2018.03 @@ -0,0 +1,18 @@ +FROM public.ecr.aws/amazonlinux/amazonlinux:2018.03 + +RUN yum install -y \ + gcc-c++ \ + git \ + ninja-build \ + curl-devel \ + openssl-devel \ + openssl-static \ + zlib-devel \ + gtest-devel \ + zip +RUN curl -fLo cmake-install https://github.com/Kitware/CMake/releases/download/v3.13.0/cmake-3.13.0-Linux-x86_64.sh && \ + sh cmake-install --skip-license --prefix=/usr --exclude-subdirectory; +RUN git clone --recurse-submodules https://github.com/aws/aws-sdk-cpp +RUN cmake -Saws-sdk-cpp -Baws-sdk-cpp/build -DBUILD_ONLY=lambda -DENABLE_TESTING=OFF -GNinja +RUN cd aws-sdk-cpp/build && ninja-build && ninja-build install +RUN ln -s /usr/bin/ninja-build /usr/local/bin/ninja diff --git a/ci/docker/arch-linux b/ci/docker/arch-linux new file mode 100644 index 0000000..dce1a50 --- /dev/null +++ b/ci/docker/arch-linux @@ -0,0 +1,22 @@ +FROM public.ecr.aws/docker/library/archlinux:latest + +RUN pacman -Sy --noconfirm git +RUN git clone --recurse-submodules https://github.com/aws/aws-sdk-cpp.git +RUN pacman -Sy --noconfirm \ + cmake \ + ninja \ + clang \ + curl \ + zip + + +# Note: (2022-08-23) +# Using -DUSE_OPENSSL=OFF as a workaround to an AWS SDK dependency issue with this distro. +# The current SDK version has a dependency on a static build version of openssl, not available through pacman. +# ref: https://github.com/aws/aws-sdk-cpp/issues/1910 +RUN CC=/usr/bin/clang CXX=/usr/bin/clang++ cmake -Saws-sdk-cpp -Baws-sdk-cpp/build -GNinja \ + -DBUILD_ONLY=lambda \ + -DUSE_OPENSSL=OFF \ + -DENABLE_TESTING=OFF +RUN cmake --build aws-sdk-cpp/build -t install + diff --git a/ci/docker/ubuntu-linux-18.04 b/ci/docker/ubuntu-linux-18.04 index 175ea82..3730f57 100644 --- a/ci/docker/ubuntu-linux-18.04 +++ b/ci/docker/ubuntu-linux-18.04 @@ -1,15 +1,22 @@ -FROM ubuntu:18.04 +FROM public.ecr.aws/ubuntu/ubuntu:18.04 -RUN apt-get update; apt-get install git clang clang-tidy clang-format zlib1g-dev libssl-dev libcurl4-openssl-dev wget \ -ninja-build python3-pip zip -y +RUN apt-get update +RUN apt-get install -y \ + git \ + clang \ + zlib1g-dev \ + libssl-dev \ + libcurl4-openssl-dev \ + wget \ + ninja-build \ + zip - -RUN wget -O cmake-install https://github.com/Kitware/CMake/releases/download/v3.13.0/cmake-3.13.0-Linux-x86_64.sh; \ -sh cmake-install --skip-license --prefix=/usr --exclude-subdirectory; - -RUN pip3 install --upgrade pip - -RUN git clone https://github.com/aws/aws-sdk-cpp.git +RUN wget -O cmake-install https://github.com/Kitware/CMake/releases/download/v3.13.0/cmake-3.13.0-Linux-x86_64.sh && \ + sh cmake-install --skip-license --prefix=/usr --exclude-subdirectory; RUN update-alternatives --set cc /usr/bin/clang RUN update-alternatives --set c++ /usr/bin/clang++ + +RUN git clone https://github.com/aws/aws-sdk-cpp.git --recurse-submodules +RUN cmake -Saws-sdk-cpp -Baws-sdk-cpp/build -DBUILD_ONLY=lambda -DENABLE_TESTING=OFF -GNinja +RUN cd aws-sdk-cpp/build && ninja && ninja install diff --git a/ci/update-images.sh b/ci/update-images.sh new file mode 100755 index 0000000..ffff6e5 --- /dev/null +++ b/ci/update-images.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +set -euo pipefail + +PRJ_ROOT=$(git rev-parse --show-toplevel) +ECR_NAME=${ECR_NAME:-aws-lambda-cpp-ci} +REGION=${AWS_DEFAULT_REGION:-us-west-2} +ACCOUNT_ID=$(aws sts get-caller-identity --output text --query "Account") + +aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $ACCOUNT_ID.dkr.ecr.$REGION.amazonaws.com + +# on Linux, if buildx is giving trouble - run: +# docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + +build-and-push () { + TAG=$ACCOUNT_ID.dkr.ecr.$REGION.amazonaws.com/$ECR_NAME:$1-$(echo $2 | sed 's|/|-|g') + docker build --platform $2 -t $TAG -f "$PRJ_ROOT/ci/docker/$1" . + docker push $TAG +} + +if [[ $(arch) == "aarch64" ]]; then + build-and-push amazon-linux-2 linux/arm64 +else + build-and-push ubuntu-linux-18.04 linux/amd64 + build-and-push alpine-linux-3.15 linux/amd64 + build-and-push amazon-linux-2018.03 linux/amd64 + build-and-push amazon-linux-2 linux/amd64 + build-and-push arch-linux linux/amd64 +fi diff --git a/examples/Dockerfile b/examples/Dockerfile index c7af1ec..1aabd59 100644 --- a/examples/Dockerfile +++ b/examples/Dockerfile @@ -1,3 +1,3 @@ FROM alpine:latest -RUN apk add --no-cache cmake make g++ git bash curl-dev zlib-dev libexecinfo-dev +RUN apk add --no-cache cmake make g++ git bash zip curl-dev zlib-dev libexecinfo-dev diff --git a/examples/api-gateway/README.md b/examples/api-gateway/README.md index d184165..720f875 100644 --- a/examples/api-gateway/README.md +++ b/examples/api-gateway/README.md @@ -14,10 +14,8 @@ $ cd build $ cmake .. -DBUILD_ONLY="core" \ -DCMAKE_BUILD_TYPE=Release \ -DBUILD_SHARED_LIBS=OFF \ - -DENABLE_UNITY_BUILD=ON \ -DCUSTOM_MEMORY_MANAGEMENT=OFF \ - -DCMAKE_INSTALL_PREFIX=~/install \ - -DENABLE_UNITY_BUILD=ON + -DCMAKE_INSTALL_PREFIX=~/install $ make $ make install ``` diff --git a/examples/demo/CMakeLists.txt b/examples/demo/CMakeLists.txt new file mode 100644 index 0000000..06aad51 --- /dev/null +++ b/examples/demo/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 3.9) +set(CMAKE_CXX_STANDARD 11) +project(demo LANGUAGES CXX) +find_package(aws-lambda-runtime) +add_executable(${PROJECT_NAME} "main.cpp") +target_link_libraries(${PROJECT_NAME} PRIVATE AWS::aws-lambda-runtime) +target_compile_features(${PROJECT_NAME} PRIVATE "cxx_std_11") +target_compile_options(${PROJECT_NAME} PRIVATE "-Wall" "-Wextra") + +# this line creates a target that packages your binary and zips it up +aws_lambda_package_target(${PROJECT_NAME}) diff --git a/examples/demo/main.cpp b/examples/demo/main.cpp new file mode 100644 index 0000000..5381311 --- /dev/null +++ b/examples/demo/main.cpp @@ -0,0 +1,20 @@ +#include + +using namespace aws::lambda_runtime; + +static invocation_response my_handler(invocation_request const& req) +{ + if (req.payload.length() > 42) { + return invocation_response::failure("error message here"/*error_message*/, + "error type here" /*error_type*/); + } + + return invocation_response::success("{\"message:\":\"I fail if body length is bigger than 42!\"}" /*payload*/, + "application/json" /*MIME type*/); +} + +int main() +{ + run_handler(my_handler); + return 0; +} diff --git a/examples/dynamodb/README.md b/examples/dynamodb/README.md index db84fd8..ebd6316 100644 --- a/examples/dynamodb/README.md +++ b/examples/dynamodb/README.md @@ -16,10 +16,8 @@ $ cd build $ cmake .. -DBUILD_ONLY="dynamodb" \ -DCMAKE_BUILD_TYPE=Release \ -DBUILD_SHARED_LIBS=OFF \ - -DENABLE_UNITY_BUILD=ON \ -DCUSTOM_MEMORY_MANAGEMENT=OFF \ - -DCMAKE_INSTALL_PREFIX=~/install \ - -DENABLE_UNITY_BUILD=ON + -DCMAKE_INSTALL_PREFIX=~/install $ make -j 4 $ make install diff --git a/examples/dynamodb/main.cpp b/examples/dynamodb/main.cpp index a8b8662..6089fa3 100644 --- a/examples/dynamodb/main.cpp +++ b/examples/dynamodb/main.cpp @@ -173,9 +173,9 @@ aws::lambda_runtime::invocation_response my_handler( if (cr.error_msg) { JsonValue response; response.WithString("body", cr.error_msg).WithInteger("statusCode", 400); - auto const apig_response = response.View().WriteCompact(); + auto apig_response = response.View().WriteCompact(); AWS_LOGSTREAM_ERROR(TAG, "Validation failed. " << apig_response); - return aws::lambda_runtime::invocation_response::success(apig_response, "application/json"); + return aws::lambda_runtime::invocation_response::success(std::move(apig_response), "application/json"); } auto result = query(cr, client); @@ -190,10 +190,10 @@ aws::lambda_runtime::invocation_response my_handler( response.WithString("body", "No data found for this product.").WithInteger("statusCode", 400); } - auto const apig_response = response.View().WriteCompact(); + auto apig_response = response.View().WriteCompact(); AWS_LOGSTREAM_DEBUG(TAG, "api gateway response: " << apig_response); - return aws::lambda_runtime::invocation_response::success(apig_response, "application/json"); + return aws::lambda_runtime::invocation_response::success(std::move(apig_response), "application/json"); } std::function()> GetConsoleLoggerFactory() diff --git a/examples/s3/README.md b/examples/s3/README.md index 8bc3255..efc0c77 100644 --- a/examples/s3/README.md +++ b/examples/s3/README.md @@ -17,8 +17,7 @@ $ cmake .. -DBUILD_ONLY="s3" \ -DCMAKE_BUILD_TYPE=Release \ -DBUILD_SHARED_LIBS=OFF \ -DCUSTOM_MEMORY_MANAGEMENT=OFF \ - -DCMAKE_INSTALL_PREFIX=~/install \ - -DENABLE_UNITY_BUILD=ON + -DCMAKE_INSTALL_PREFIX=~/install $ make $ make install @@ -34,7 +33,7 @@ $ mkdir build $ cd build $ cmake .. -DCMAKE_BUILD_TYPE=Release \ -DBUILD_SHARED_LIBS=OFF \ - -DCMAKE_INSTALL_PREFIX=~/install \ + -DCMAKE_INSTALL_PREFIX=~/install $ make $ make install ``` diff --git a/examples/s3/main.cpp b/examples/s3/main.cpp index 45b935b..892560c 100644 --- a/examples/s3/main.cpp +++ b/examples/s3/main.cpp @@ -50,7 +50,7 @@ static invocation_response my_handler(invocation_request const& req, Aws::S3::S3 return invocation_response::failure(err, "DownloadFailure"); } - return invocation_response::success(base64_encoded_file, "application/base64"); + return invocation_response::success(std::move(base64_encoded_file), "application/base64"); } std::function()> GetConsoleLoggerFactory() @@ -73,8 +73,7 @@ int main() config.region = Aws::Environment::GetEnv("AWS_REGION"); config.caFile = "/etc/pki/tls/certs/ca-bundle.crt"; - auto credentialsProvider = Aws::MakeShared(TAG); - S3::S3Client client(credentialsProvider, config); + S3::S3Client client(config); auto handler_fn = [&client](aws::lambda_runtime::invocation_request const& req) { return my_handler(req, client); }; diff --git a/include/aws/http/response.h b/include/aws/http/response.h index 9b8cbda..6ef0c20 100644 --- a/include/aws/http/response.h +++ b/include/aws/http/response.h @@ -14,6 +14,8 @@ * permissions and limitations under the License. */ +#include "aws/lambda-runtime/outcome.h" + #include #include #include @@ -31,7 +33,7 @@ class response { inline void add_header(std::string name, std::string const& value); inline void append_body(const char* p, size_t sz); inline bool has_header(char const* header) const; - inline std::string const& get_header(char const* header) const; + inline lambda_runtime::outcome get_header(char const* header) const; inline response_code get_response_code() const { return m_response_code; } inline void set_response_code(aws::http::response_code c); inline void set_content_type(char const* ct); @@ -114,7 +116,7 @@ enum class response_code { GATEWAY_TIMEOUT = 504, HTTP_VERSION_NOT_SUPPORTED = 505, VARIANT_ALSO_NEGOTIATES = 506, - INSUFFICIENT_STORAGE = 506, + INSUFFICIENT_STORAGE = 507, LOOP_DETECTED = 508, BANDWIDTH_LIMIT_EXCEEDED = 509, NOT_EXTENDED = 510, @@ -140,7 +142,7 @@ inline std::string const& response::get_body() const inline void response::add_header(std::string name, std::string const& value) { std::transform(name.begin(), name.end(), name.begin(), ::tolower); - m_headers.emplace_back(name, value); + m_headers.emplace_back(std::move(name), value); } inline void response::append_body(const char* p, size_t sz) @@ -161,12 +163,15 @@ inline bool response::has_header(char const* header) const }); } -inline std::string const& response::get_header(char const* header) const +inline lambda_runtime::outcome response::get_header(char const* header) const { auto it = std::find_if(m_headers.begin(), m_headers.end(), [header](std::pair const& p) { return p.first == header; }); - assert(it != m_headers.end()); + + if (it == m_headers.end()) { + return false; + } return it->second; } diff --git a/include/aws/lambda-runtime/outcome.h b/include/aws/lambda-runtime/outcome.h index b5d0b8b..9982f5d 100644 --- a/include/aws/lambda-runtime/outcome.h +++ b/include/aws/lambda-runtime/outcome.h @@ -49,14 +49,20 @@ class outcome { } } - ~outcome() + ~outcome() { destroy(); } + + outcome& operator=(outcome&& other) noexcept { - if (m_success) { - m_s.~TResult(); + assert(this != &other); + destroy(); + if (other.m_success) { + new (&m_s) TResult(std::move(other.m_s)); } else { - m_f.~TFailure(); + new (&m_f) TFailure(std::move(other.m_f)); } + m_success = other.m_success; + return *this; } TResult const& get_result() const& @@ -86,6 +92,16 @@ class outcome { bool is_success() const { return m_success; } private: + void destroy() + { + if (m_success) { + m_s.~TResult(); + } + else { + m_f.~TFailure(); + } + } + union { TResult m_s; TFailure m_f; diff --git a/include/aws/lambda-runtime/runtime.h b/include/aws/lambda-runtime/runtime.h index 94e1e22..46b8817 100644 --- a/include/aws/lambda-runtime/runtime.h +++ b/include/aws/lambda-runtime/runtime.h @@ -105,7 +105,7 @@ class invocation_response { /** * Create a successful invocation response with the given payload and content-type. */ - static invocation_response success(std::string const& payload, std::string const& content_type); + static invocation_response success(std::string payload, std::string content_type); /** * Create a failure response with the given error message and error type. @@ -129,8 +129,7 @@ class invocation_response { bool is_success() const { return m_success; } }; -struct no_result { -}; +struct no_result {}; class runtime { public: @@ -163,8 +162,6 @@ class runtime { std::string const& url, std::string const& request_id, invocation_response const& handler_response); - -private: std::string const m_user_agent_header; std::array const m_endpoints; CURL* const m_curl_handle; diff --git a/packaging/packager b/packaging/packager index ab27526..c050696 100755 --- a/packaging/packager +++ b/packaging/packager @@ -45,7 +45,6 @@ done set -- "${POSITIONAL[@]}" # restore positional parameters PKG_BIN_PATH=$1 -architecture=$(arch) if [ ! -f "$PKG_BIN_PATH" ]; then echo "$PKG_BIN_PATH" - No such file.; @@ -56,26 +55,43 @@ if ! type zip > /dev/null 2>&1; then echo "zip utility is not found. Please install it and re-run this script" exit 1 fi -function package_libc_via_pacman { + +function pluck_so_files() { + sed -E '/\.so$|\.so\.[0-9]+$/!d' +} + +function package_libc_alpine() { + # -F matches a fixed string rather than a regex (grep that comes with busybox doesn't know --fixed-strings) + if grep -F "Alpine Linux" < /etc/os-release > /dev/null; then + if type apk > /dev/null 2>&1; then + apk info --contents musl 2>/dev/null | pluck_so_files | sed 's/^/\//' + fi + fi +} + +function package_libc_pacman() { if grep --extended-regexp "Arch Linux|Manjaro Linux" < /etc/os-release > /dev/null 2>&1; then if type pacman > /dev/null 2>&1; then - pacman --query --list --quiet glibc | sed -E '/\.so$|\.so\.[0-9]+$/!d' + pacman --query --list --quiet glibc | pluck_so_files fi fi } -function package_libc_via_dpkg() { +function package_libc_dpkg() { if type dpkg-query > /dev/null 2>&1; then - if [[ $(dpkg-query --listfiles libc6 | wc -l) -gt 0 ]]; then - dpkg-query --listfiles libc6 | sed -E '/\.so$|\.so\.[0-9]+$/!d' + architecture=$(dpkg --print-architecture) + if [[ $(dpkg-query --listfiles libc6:$architecture | wc -l) -gt 0 ]]; then + dpkg-query --listfiles libc6:$architecture | pluck_so_files fi fi } -function package_libc_via_rpm() { +function package_libc_rpm() { + arch=$(uname -m) + if type rpm > /dev/null 2>&1; then - if [[ $(rpm --query --list glibc.$architecture | wc -l) -gt 1 ]]; then - rpm --query --list glibc.$architecture | sed -E '/\.so$|\.so\.[0-9]+$/!d' + if [[ $(rpm --query --list glibc.$arch | wc -l) -gt 1 ]]; then + rpm --query --list glibc.$arch | pluck_so_files fi fi } @@ -99,9 +115,10 @@ PKG_LD="" list=$(ldd "$PKG_BIN_PATH" | awk '{print $(NF-1)}') libc_libs=() -libc_libs+=($(package_libc_via_dpkg)) -libc_libs+=($(package_libc_via_rpm)) -libc_libs+=($(package_libc_via_pacman)) +libc_libs+=($(package_libc_dpkg)) +libc_libs+=($(package_libc_rpm)) +libc_libs+=($(package_libc_pacman)) +libc_libs+=($(package_libc_alpine)) mkdir -p "$PKG_DIR/bin" "$PKG_DIR/lib" @@ -154,20 +171,11 @@ exec \$LAMBDA_TASK_ROOT/lib/$PKG_LD --library-path \$LAMBDA_TASK_ROOT/lib \$LAMB EOF ) -bootstrap_script_no_libc=$(cat < "$PKG_DIR/bootstrap" else - echo -e "$bootstrap_script_no_libc" > "$PKG_DIR/bootstrap" + cp "$PKG_BIN_PATH" "$PKG_DIR/bootstrap" fi chmod +x "$PKG_DIR/bootstrap" # some shenanigans to create the right layout in the zip file without extraneous directories diff --git a/src/backward.cpp b/src/backward.cpp index cc64abd..8649fd5 100644 --- a/src/backward.cpp +++ b/src/backward.cpp @@ -23,10 +23,14 @@ // - g++/clang++ -lbfd ... // #define BACKWARD_HAS_BFD 1 -#include "backward.h" +#ifndef no_backtrace + +# include "backward.h" namespace backward { backward::SignalHandling sh; } // namespace backward + +#endif diff --git a/src/backward.h b/src/backward.h index 5128410..832c452 100644 --- a/src/backward.h +++ b/src/backward.h @@ -1,4 +1,3 @@ -// clang-format off /* * backward.hpp * Copyright 2013 Google Inc. All Rights Reserved. @@ -29,6 +28,10 @@ # error "It's not going to compile without a C++ compiler..." #endif +#ifdef no_backtrace +# pragma message "Disabling stacktracing" +#else + #if defined(BACKWARD_CXX11) #elif defined(BACKWARD_CXX98) #else @@ -36,6 +39,9 @@ # define BACKWARD_CXX11 # define BACKWARD_ATLEAST_CXX11 # define BACKWARD_ATLEAST_CXX98 +# if __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +# define BACKWARD_ATLEAST_CXX17 +# endif # else # define BACKWARD_CXX98 # define BACKWARD_ATLEAST_CXX98 @@ -50,6 +56,9 @@ // #define BACKWARD_SYSTEM_DARWIN // - specialization for Mac OS X 10.5 and later. // +// #define BACKWARD_SYSTEM_WINDOWS +// - specialization for Windows (Clang 9 and MSVC2017) +// // #define BACKWARD_SYSTEM_UNKNOWN // - placebo implementation, does nothing. // @@ -85,6 +94,8 @@ #include #include #include +#include +#include #if defined(BACKWARD_SYSTEM_LINUX) @@ -98,6 +109,11 @@ // exception. // - normally libgcc is already linked to your program by default. // +// #define BACKWARD_HAS_LIBUNWIND 1 +// - libunwind provides, in some cases, a more accurate stacktrace as it knows +// to decode signal handler frames and lets us edit the context registers when +// unwinding, allowing stack traces over bad function references. +// // #define BACKWARD_HAS_BACKTRACE == 1 // - backtrace seems to be a little bit more portable than libunwind, but on // linux, it uses unwind anyway, but abstract away a tiny information that is @@ -110,10 +126,13 @@ // Note that only one of the define should be set to 1 at a time. // # if BACKWARD_HAS_UNWIND == 1 +# elif BACKWARD_HAS_LIBUNWIND == 1 # elif BACKWARD_HAS_BACKTRACE == 1 # else # undef BACKWARD_HAS_UNWIND # define BACKWARD_HAS_UNWIND 1 +# undef BACKWARD_HAS_LIBUNWIND +# define BACKWARD_HAS_LIBUNWIND 0 # undef BACKWARD_HAS_BACKTRACE # define BACKWARD_HAS_BACKTRACE 0 # endif @@ -126,9 +145,10 @@ // - object filename // - function name // - source filename -// - line and column numbers -// - source code snippet (assuming the file is accessible) -// - variables name and values (if not optimized out) +// - line and column numbers +// - source code snippet (assuming the file is accessible) +// - variable names (if not optimized out) +// - variable values (not supported by backward-cpp) // - You need to link with the lib "dw": // - apt-get install libdw-dev // - g++/clang++ -ldw ... @@ -138,8 +158,8 @@ // - object filename // - function name // - source filename -// - line numbers -// - source code snippet (assuming the file is accessible) +// - line numbers +// - source code snippet (assuming the file is accessible) // - You need to link with the lib "bfd": // - apt-get install binutils-dev // - g++/clang++ -lbfd ... @@ -151,7 +171,8 @@ // - source filename // - line and column numbers // - source code snippet (assuming the file is accessible) -// - variables name and values (if not optimized out) +// - variable names (if not optimized out) +// - variable values (not supported by backward-cpp) // - You need to link with the lib "dwarf": // - apt-get install libdwarf-dev // - g++/clang++ -ldwarf ... @@ -240,13 +261,10 @@ # endif # endif -# if BACKWARD_HAS_BACKTRACE_SYMBOL == 1 -# include -# endif - # if (BACKWARD_HAS_BACKTRACE == 1) || (BACKWARD_HAS_BACKTRACE_SYMBOL == 1) // then we shall rely on backtrace # include +# include # endif #endif // defined(BACKWARD_SYSTEM_LINUX) @@ -258,10 +276,16 @@ // #define BACKWARD_HAS_UNWIND 1 // - unwind comes from libgcc, but I saw an equivalent inside clang itself. // - with unwind, the stacktrace is as accurate as it can possibly be, since -// this is used by the C++ runtime in gcc/clang for stack unwinding on +// this is used by the C++ runtine in gcc/clang for stack unwinding on // exception. // - normally libgcc is already linked to your program by default. // +// #define BACKWARD_HAS_LIBUNWIND 1 +// - libunwind comes from clang, which implements an API compatible version. +// - libunwind provides, in some cases, a more accurate stacktrace as it knows +// to decode signal handler frames and lets us edit the context registers when +// unwinding, allowing stack traces over bad function references. +// // #define BACKWARD_HAS_BACKTRACE == 1 // - backtrace is available by default, though it does not produce as much // information as another library might. @@ -273,11 +297,14 @@ // # if BACKWARD_HAS_UNWIND == 1 # elif BACKWARD_HAS_BACKTRACE == 1 +# elif BACKWARD_HAS_LIBUNWIND == 1 # else # undef BACKWARD_HAS_UNWIND # define BACKWARD_HAS_UNWIND 1 # undef BACKWARD_HAS_BACKTRACE # define BACKWARD_HAS_BACKTRACE 0 +# undef BACKWARD_HAS_LIBUNWIND +# define BACKWARD_HAS_LIBUNWIND 0 # endif // On Darwin, backward can extract detailed information about a stack trace @@ -296,7 +323,6 @@ # undef BACKWARD_HAS_BACKTRACE_SYMBOL # define BACKWARD_HAS_BACKTRACE_SYMBOL 1 # endif - # include # include # include @@ -315,14 +341,16 @@ # include # include -# include +# include typedef SSIZE_T ssize_t; -# define NOMINMAX -# include +# ifndef NOMINMAX +# define NOMINMAX +# endif +# include # include -# include +# include # include # ifndef __clang__ @@ -330,8 +358,10 @@ typedef SSIZE_T ssize_t; # define NOINLINE __declspec(noinline) # endif -# pragma comment(lib, "psapi.lib") -# pragma comment(lib, "dbghelp.lib") +# ifdef _MSC_VER +# pragma comment(lib, "psapi.lib") +# pragma comment(lib, "dbghelp.lib") +# endif // Comment / packing is from stackoverflow: // https://stackoverflow.com/questions/6205981/windows-c-stack-trace-from-a-running-app/28276227#28276227 @@ -376,6 +406,11 @@ extern "C" uintptr_t _Unwind_GetIPInfo(_Unwind_Context*, int*); #endif // BACKWARD_HAS_UNWIND == 1 +#if BACKWARD_HAS_LIBUNWIND == 1 +# define UNW_LOCAL_ONLY +# include +#endif // BACKWARD_HAS_LIBUNWIND == 1 + #ifdef BACKWARD_ATLEAST_CXX11 # include # include // for std::swap @@ -567,7 +602,7 @@ class handle { void update(T new_val) { _val = new_val; - _empty = static_cast(new_val); + _empty = !static_cast(new_val); } operator const dummy*() const @@ -728,14 +763,14 @@ class StackTraceImpl { size_t size() const { return 0; } Trace operator[](size_t) const { return Trace(); } size_t load_here(size_t = 0) { return 0; } - size_t load_from(void*, size_t = 0) { return 0; } + size_t load_from(void*, size_t = 0, void* = nullptr, void* = nullptr) { return 0; } size_t thread_id() const { return 0; } void skip_n_firsts(size_t) {} }; class StackTraceImplBase { public: - StackTraceImplBase() : _thread_id(0), _skip(0) {} + StackTraceImplBase() : _thread_id(0), _skip(0), _context(nullptr), _error_addr(nullptr) {} size_t thread_id() const { return _thread_id; } @@ -764,16 +799,24 @@ class StackTraceImplBase { #endif } + void set_context(void* context) { _context = context; } + void* context() const { return _context; } + + void set_error_addr(void* error_addr) { _error_addr = error_addr; } + void* error_addr() const { return _error_addr; } + size_t skip_n_firsts() const { return _skip; } private: size_t _thread_id; size_t _skip; + void* _context; + void* _error_addr; }; class StackTraceImplHolder : public StackTraceImplBase { public: - size_t size() const { return _stacktrace.size() ? _stacktrace.size() - skip_n_firsts() : 0; } + size_t size() const { return (_stacktrace.size() >= skip_n_firsts()) ? _stacktrace.size() - skip_n_firsts() : 0; } Trace operator[](size_t idx) const { if (idx >= size()) { @@ -861,9 +904,11 @@ template <> class StackTraceImpl : public StackTraceImplHolder { public: NOINLINE - size_t load_here(size_t depth = 32) + size_t load_here(size_t depth = 32, void* context = nullptr, void* error_addr = nullptr) { load_thread_info(); + set_context(context); + set_error_addr(error_addr); if (depth == 0) { return 0; } @@ -873,9 +918,9 @@ class StackTraceImpl : public StackTraceImplHolder { skip_n_firsts(0); return size(); } - size_t load_from(void* addr, size_t depth = 32) + size_t load_from(void* addr, size_t depth = 32, void* context = nullptr, void* error_addr = nullptr) { - load_here(depth + 8); + load_here(depth + 8, context, error_addr); for (size_t i = 0; i < _stacktrace.size(); ++i) { if (_stacktrace[i] == addr) { @@ -897,14 +942,182 @@ class StackTraceImpl : public StackTraceImplHolder { }; }; +#elif BACKWARD_HAS_LIBUNWIND == 1 + +template <> +class StackTraceImpl : public StackTraceImplHolder { +public: + __attribute__((noinline)) size_t load_here(size_t depth = 32, void* _context = nullptr, void* _error_addr = nullptr) + { + set_context(_context); + set_error_addr(_error_addr); + load_thread_info(); + if (depth == 0) { + return 0; + } + _stacktrace.resize(depth + 1); + + int result = 0; + + unw_context_t ctx; + size_t index = 0; + + // Add the tail call. If the Instruction Pointer is the crash address it + // means we got a bad function pointer dereference, so we "unwind" the + // bad pointer manually by using the return address pointed to by the + // Stack Pointer as the Instruction Pointer and letting libunwind do + // the rest + + if (context()) { + ucontext_t* uctx = reinterpret_cast(context()); +# ifdef REG_RIP // x86_64 + if (uctx->uc_mcontext.gregs[REG_RIP] == reinterpret_cast(error_addr())) { + uctx->uc_mcontext.gregs[REG_RIP] = *reinterpret_cast(uctx->uc_mcontext.gregs[REG_RSP]); + } + _stacktrace[index] = reinterpret_cast(uctx->uc_mcontext.gregs[REG_RIP]); + ++index; + ctx = *reinterpret_cast(uctx); +# elif defined(REG_EIP) // x86_32 + if (uctx->uc_mcontext.gregs[REG_EIP] == reinterpret_cast(error_addr())) { + uctx->uc_mcontext.gregs[REG_EIP] = *reinterpret_cast(uctx->uc_mcontext.gregs[REG_ESP]); + } + _stacktrace[index] = reinterpret_cast(uctx->uc_mcontext.gregs[REG_EIP]); + ++index; + ctx = *reinterpret_cast(uctx); +# elif defined(__arm__) + // libunwind uses its own context type for ARM unwinding. + // Copy the registers from the signal handler's context so we can + // unwind + unw_getcontext(&ctx); + ctx.regs[UNW_ARM_R0] = uctx->uc_mcontext.arm_r0; + ctx.regs[UNW_ARM_R1] = uctx->uc_mcontext.arm_r1; + ctx.regs[UNW_ARM_R2] = uctx->uc_mcontext.arm_r2; + ctx.regs[UNW_ARM_R3] = uctx->uc_mcontext.arm_r3; + ctx.regs[UNW_ARM_R4] = uctx->uc_mcontext.arm_r4; + ctx.regs[UNW_ARM_R5] = uctx->uc_mcontext.arm_r5; + ctx.regs[UNW_ARM_R6] = uctx->uc_mcontext.arm_r6; + ctx.regs[UNW_ARM_R7] = uctx->uc_mcontext.arm_r7; + ctx.regs[UNW_ARM_R8] = uctx->uc_mcontext.arm_r8; + ctx.regs[UNW_ARM_R9] = uctx->uc_mcontext.arm_r9; + ctx.regs[UNW_ARM_R10] = uctx->uc_mcontext.arm_r10; + ctx.regs[UNW_ARM_R11] = uctx->uc_mcontext.arm_fp; + ctx.regs[UNW_ARM_R12] = uctx->uc_mcontext.arm_ip; + ctx.regs[UNW_ARM_R13] = uctx->uc_mcontext.arm_sp; + ctx.regs[UNW_ARM_R14] = uctx->uc_mcontext.arm_lr; + ctx.regs[UNW_ARM_R15] = uctx->uc_mcontext.arm_pc; + + // If we have crashed in the PC use the LR instead, as this was + // a bad function dereference + if (reinterpret_cast(error_addr()) == uctx->uc_mcontext.arm_pc) { + ctx.regs[UNW_ARM_R15] = uctx->uc_mcontext.arm_lr - sizeof(unsigned long); + } + _stacktrace[index] = reinterpret_cast(ctx.regs[UNW_ARM_R15]); + ++index; +# elif defined(__APPLE__) && defined(__x86_64__) + unw_getcontext(&ctx); + // OS X's implementation of libunwind uses its own context object + // so we need to convert the passed context to libunwind's format + // (information about the data layout taken from unw_getcontext.s + // in Apple's libunwind source + ctx.data[0] = uctx->uc_mcontext->__ss.__rax; + ctx.data[1] = uctx->uc_mcontext->__ss.__rbx; + ctx.data[2] = uctx->uc_mcontext->__ss.__rcx; + ctx.data[3] = uctx->uc_mcontext->__ss.__rdx; + ctx.data[4] = uctx->uc_mcontext->__ss.__rdi; + ctx.data[5] = uctx->uc_mcontext->__ss.__rsi; + ctx.data[6] = uctx->uc_mcontext->__ss.__rbp; + ctx.data[7] = uctx->uc_mcontext->__ss.__rsp; + ctx.data[8] = uctx->uc_mcontext->__ss.__r8; + ctx.data[9] = uctx->uc_mcontext->__ss.__r9; + ctx.data[10] = uctx->uc_mcontext->__ss.__r10; + ctx.data[11] = uctx->uc_mcontext->__ss.__r11; + ctx.data[12] = uctx->uc_mcontext->__ss.__r12; + ctx.data[13] = uctx->uc_mcontext->__ss.__r13; + ctx.data[14] = uctx->uc_mcontext->__ss.__r14; + ctx.data[15] = uctx->uc_mcontext->__ss.__r15; + ctx.data[16] = uctx->uc_mcontext->__ss.__rip; + + // If the IP is the same as the crash address we have a bad function + // dereference The caller's address is pointed to by %rsp, so we + // dereference that value and set it to be the next frame's IP. + if (uctx->uc_mcontext->__ss.__rip == reinterpret_cast<__uint64_t>(error_addr())) { + ctx.data[16] = *reinterpret_cast<__uint64_t*>(uctx->uc_mcontext->__ss.__rsp); + } + _stacktrace[index] = reinterpret_cast(ctx.data[16]); + ++index; +# elif defined(__APPLE__) + unw_getcontext(&ctx) + // TODO: Convert the ucontext_t to libunwind's unw_context_t like + // we do in 64 bits + if (ctx.uc_mcontext->__ss.__eip == reinterpret_cast(error_addr())) + { + ctx.uc_mcontext->__ss.__eip = ctx.uc_mcontext->__ss.__esp; + } + _stacktrace[index] = reinterpret_cast(ctx.uc_mcontext->__ss.__eip); + ++index; +# endif + } + + unw_cursor_t cursor; + if (context()) { +# if defined(UNW_INIT_SIGNAL_FRAME) + result = unw_init_local2(&cursor, &ctx, UNW_INIT_SIGNAL_FRAME); +# else + result = unw_init_local(&cursor, &ctx); +# endif + } + else { + unw_getcontext(&ctx); + ; + result = unw_init_local(&cursor, &ctx); + } + + if (result != 0) + return 1; + + unw_word_t ip = 0; + + while (index <= depth && unw_step(&cursor) > 0) { + result = unw_get_reg(&cursor, UNW_REG_IP, &ip); + if (result == 0) { + _stacktrace[index] = reinterpret_cast(--ip); + ++index; + } + } + --index; + + _stacktrace.resize(index + 1); + skip_n_firsts(0); + return size(); + } + + size_t load_from(void* addr, size_t depth = 32, void* context = nullptr, void* error_addr = nullptr) + { + load_here(depth + 8, context, error_addr); + + for (size_t i = 0; i < _stacktrace.size(); ++i) { + if (_stacktrace[i] == addr) { + skip_n_firsts(i); + _stacktrace[i] = (void*)((uintptr_t)_stacktrace[i]); + break; + } + } + + _stacktrace.resize(std::min(_stacktrace.size(), skip_n_firsts() + depth)); + return size(); + } +}; + #elif defined(BACKWARD_HAS_BACKTRACE) template <> class StackTraceImpl : public StackTraceImplHolder { public: NOINLINE - size_t load_here(size_t depth = 32) + size_t load_here(size_t depth = 32, void* context = nullptr, void* error_addr = nullptr) { + set_context(context); + set_error_addr(error_addr); load_thread_info(); if (depth == 0) { return 0; @@ -916,9 +1129,9 @@ class StackTraceImpl : public StackTraceImplHolder { return size(); } - size_t load_from(void* addr, size_t depth = 32) + size_t load_from(void* addr, size_t depth = 32, void* context = nullptr, void* error_addr = nullptr) { - load_here(depth + 8); + load_here(depth + 8, context, error_addr); for (size_t i = 0; i < _stacktrace.size(); ++i) { if (_stacktrace[i] == addr) { @@ -945,9 +1158,10 @@ class StackTraceImpl : public StackTraceImplHolder { void set_thread_handle(HANDLE handle) { thd_ = handle; } NOINLINE - size_t load_here(size_t depth = 32) + size_t load_here(size_t depth = 32, void* context = nullptr, void* error_addr = nullptr) { - + set_context(static_cast(context)); + set_error_addr(error_addr); CONTEXT localCtx; // used when no context is provided if (depth == 0) { @@ -1009,9 +1223,9 @@ class StackTraceImpl : public StackTraceImplHolder { return size(); } - size_t load_from(void* addr, size_t depth = 32) + size_t load_from(void* addr, size_t depth = 32, void* context = nullptr, void* error_addr = nullptr) { - load_here(depth + 8); + load_here(depth + 8, context, error_addr); for (size_t i = 0; i < _stacktrace.size(); ++i) { if (_stacktrace[i] == addr) { @@ -1037,24 +1251,24 @@ class StackTrace : public StackTraceImpl { /*************** TRACE RESOLVER ***************/ -template -class TraceResolverImpl; +class TraceResolverImplBase { +public: + virtual ~TraceResolverImplBase() {} -#ifdef BACKWARD_SYSTEM_UNKNOWN + virtual void load_addresses(void* const* addresses, int address_count) + { + (void)addresses; + (void)address_count; + } -template <> -class TraceResolverImpl { -public: template - void load_stacktrace(ST&) + void load_stacktrace(ST& st) { + load_addresses(st.begin(), (int)st.size()); } - ResolvedTrace resolve(ResolvedTrace t) { return t; } -}; -#endif + virtual ResolvedTrace resolve(ResolvedTrace t) { return t; } -class TraceResolverImplBase { protected: std::string demangle(const char* funcname) { return _demangler.demangle(funcname); } @@ -1062,6 +1276,17 @@ class TraceResolverImplBase { details::demangler _demangler; }; +template +class TraceResolverImpl; + +#ifdef BACKWARD_SYSTEM_UNKNOWN + +template <> +class TraceResolverImpl : public TraceResolverImplBase { +}; + +#endif + #ifdef BACKWARD_SYSTEM_LINUX class TraceResolverLinuxBase : public TraceResolverImplBase { @@ -1078,6 +1303,12 @@ class TraceResolverLinuxBase : public TraceResolverImplBase { // variable; In that case, we actually open /proc/self/exe, which // is always the actual executable (even if it was deleted/replaced!) // but display the path that /proc/self/exe links to. + // However, this right away reduces probability of successful symbol + // resolution, because libbfd may try to find *.debug files in the + // same dir, in case symbols are stripped. As a result, it may try + // to find a file /proc/self/.debug, which obviously does + // not exist. /proc/self/exe is a last resort. First load attempt + // should go for the original executable file path. symbol_info.dli_fname = "/proc/self/exe"; return exec_path_; } @@ -1129,17 +1360,15 @@ class TraceResolverLinuxImpl; template <> class TraceResolverLinuxImpl : public TraceResolverLinuxBase { public: - template - void load_stacktrace(ST& st) + void load_addresses(void* const* addresses, int address_count) override { - using namespace details; - if (st.size() == 0) { + if (address_count == 0) { return; } - _symbols.reset(backtrace_symbols(st.begin(), (int)st.size())); + _symbols.reset(backtrace_symbols(addresses, address_count)); } - ResolvedTrace resolve(ResolvedTrace trace) + ResolvedTrace resolve(ResolvedTrace trace) override { char* filename = _symbols[trace.idx]; char* funcname = filename; @@ -1177,12 +1406,7 @@ class TraceResolverLinuxImpl : public TraceResolverL public: TraceResolverLinuxImpl() : _bfd_loaded(false) {} - template - void load_stacktrace(ST&) - { - } - - ResolvedTrace resolve(ResolvedTrace trace) + ResolvedTrace resolve(ResolvedTrace trace) override { Dl_info symbol_info; @@ -1213,9 +1437,45 @@ class TraceResolverLinuxImpl : public TraceResolverL } trace.object_filename = resolve_exec_path(symbol_info); - bfd_fileobject& fobj = load_object_with_bfd(symbol_info.dli_fname); - if (!fobj.handle) { - return trace; // sad, we couldn't load the object :( + bfd_fileobject* fobj; + // Before rushing to resolution need to ensure the executable + // file still can be used. For that compare inode numbers of + // what is stored by the executable's file path, and in the + // dli_fname, which not necessarily equals to the executable. + // It can be a shared library, or /proc/self/exe, and in the + // latter case has drawbacks. See the exec path resolution for + // details. In short - the dli object should be used only as + // the last resort. + // If inode numbers are equal, it is known dli_fname and the + // executable file are the same. This is guaranteed by Linux, + // because if the executable file is changed/deleted, it will + // be done in a new inode. The old file will be preserved in + // /proc/self/exe, and may even have inode 0. The latter can + // happen if the inode was actually reused, and the file was + // kept only in the main memory. + // + struct stat obj_stat; + struct stat dli_stat; + if (stat(trace.object_filename.c_str(), &obj_stat) == 0 && stat(symbol_info.dli_fname, &dli_stat) == 0 && + obj_stat.st_ino == dli_stat.st_ino) { + // The executable file, and the shared object containing the + // address are the same file. Safe to use the original path. + // this is preferable. Libbfd will search for stripped debug + // symbols in the same directory. + fobj = load_object_with_bfd(trace.object_filename); + } + else { + // The original object file was *deleted*! The only hope is + // that the debug symbols are either inside the shared + // object file, or are in the same directory, and this is + // not /proc/self/exe. + fobj = nullptr; + } + if (fobj == nullptr || !fobj->handle) { + fobj = load_object_with_bfd(symbol_info.dli_fname); + if (!fobj->handle) { + return trace; + } } find_sym_result* details_selected; // to be filled. @@ -1346,7 +1606,7 @@ class TraceResolverLinuxImpl : public TraceResolverL typedef details::hashtable::type fobj_bfd_map_t; fobj_bfd_map_t _fobj_bfd_map; - bfd_fileobject& load_object_with_bfd(const std::string& filename_object) + bfd_fileobject* load_object_with_bfd(const std::string& filename_object) { using namespace details; @@ -1358,11 +1618,11 @@ class TraceResolverLinuxImpl : public TraceResolverL fobj_bfd_map_t::iterator it = _fobj_bfd_map.find(filename_object); if (it != _fobj_bfd_map.end()) { - return it->second; + return &it->second; } // this new object is empty for now. - bfd_fileobject& r = _fobj_bfd_map[filename_object]; + bfd_fileobject* r = &_fobj_bfd_map[filename_object]; // we do the work temporary in this one; bfd_handle_t bfd_handle; @@ -1407,9 +1667,9 @@ class TraceResolverLinuxImpl : public TraceResolverL return r; // damned, that's a stripped file that you got there! } - r.handle = move(bfd_handle); - r.symtab = move(symtab); - r.dynamic_symtab = move(dynamic_symtab); + r->handle = move(bfd_handle); + r->symtab = move(symtab); + r->dynamic_symtab = move(dynamic_symtab); return r; } @@ -1428,15 +1688,15 @@ class TraceResolverLinuxImpl : public TraceResolverL find_sym_result result; }; - find_sym_result find_symbol_details(bfd_fileobject& fobj, void* addr, void* base_addr) + find_sym_result find_symbol_details(bfd_fileobject* fobj, void* addr, void* base_addr) { find_sym_context context; context.self = this; - context.fobj = &fobj; + context.fobj = fobj; context.addr = addr; context.base_addr = base_addr; context.result.found = false; - bfd_map_over_sections(fobj.handle.get(), &find_in_section_trampoline, static_cast(&context)); + bfd_map_over_sections(fobj->handle.get(), &find_in_section_trampoline, static_cast(&context)); return context.result; } @@ -1446,7 +1706,7 @@ class TraceResolverLinuxImpl : public TraceResolverL context->self->find_in_section( reinterpret_cast(context->addr), reinterpret_cast(context->base_addr), - *context->fobj, + context->fobj, section, context->result); } @@ -1454,7 +1714,7 @@ class TraceResolverLinuxImpl : public TraceResolverL void find_in_section( bfd_vma addr, bfd_vma base_addr, - bfd_fileobject& fobj, + bfd_fileobject* fobj, asection* section, find_sym_result& result) { @@ -1462,14 +1722,14 @@ class TraceResolverLinuxImpl : public TraceResolverL return; # ifdef bfd_get_section_flags - if ((bfd_get_section_flags(fobj.handle.get(), section) & SEC_ALLOC) == 0) + if ((bfd_get_section_flags(fobj->handle.get(), section) & SEC_ALLOC) == 0) # else if ((bfd_section_flags(section) & SEC_ALLOC) == 0) # endif return; // a debug section is never loaded automatically. # ifdef bfd_get_section_vma - bfd_vma sec_addr = bfd_get_section_vma(fobj.handle.get(), section); + bfd_vma sec_addr = bfd_get_section_vma(fobj->handle.get(), section); # else bfd_vma sec_addr = bfd_section_vma(section); # endif @@ -1491,22 +1751,22 @@ class TraceResolverLinuxImpl : public TraceResolverL # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" # endif - if (!result.found && fobj.symtab) { + if (!result.found && fobj->symtab) { result.found = bfd_find_nearest_line( - fobj.handle.get(), + fobj->handle.get(), section, - fobj.symtab.get(), + fobj->symtab.get(), addr - sec_addr, &result.filename, &result.funcname, &result.line); } - if (!result.found && fobj.dynamic_symtab) { + if (!result.found && fobj->dynamic_symtab) { result.found = bfd_find_nearest_line( - fobj.handle.get(), + fobj->handle.get(), section, - fobj.dynamic_symtab.get(), + fobj->dynamic_symtab.get(), addr - sec_addr, &result.filename, &result.funcname, @@ -1517,14 +1777,14 @@ class TraceResolverLinuxImpl : public TraceResolverL # endif } - ResolvedTrace::source_locs_t backtrace_inliners(bfd_fileobject& fobj, find_sym_result previous_result) + ResolvedTrace::source_locs_t backtrace_inliners(bfd_fileobject* fobj, find_sym_result previous_result) { // This function can be called ONLY after a SUCCESSFUL call to // find_symbol_details. The state is global to the bfd_handle. ResolvedTrace::source_locs_t results; while (previous_result.found) { find_sym_result result; - result.found = bfd_find_inliner_info(fobj.handle.get(), &result.filename, &result.funcname, &result.line); + result.found = bfd_find_inliner_info(fobj->handle.get(), &result.filename, &result.funcname, &result.line); if (result.found) /* and not ( cstrings_eq(previous_result.filename, @@ -1565,12 +1825,7 @@ class TraceResolverLinuxImpl : public TraceResolverLi public: TraceResolverLinuxImpl() : _dwfl_handle_initialized(false) {} - template - void load_stacktrace(ST&) - { - } - - ResolvedTrace resolve(ResolvedTrace trace) + ResolvedTrace resolve(ResolvedTrace trace) override { using namespace details; @@ -1861,9 +2116,9 @@ class TraceResolverLinuxImpl : public TraceResolverLi static const char* die_call_file(Dwarf_Die* die) { Dwarf_Attribute attr_mem; - Dwarf_Sword file_idx = 0; + Dwarf_Word file_idx = 0; - dwarf_formsdata(dwarf_attr(die, DW_AT_call_file, &attr_mem), &file_idx); + dwarf_formudata(dwarf_attr(die, DW_AT_call_file, &attr_mem), &file_idx); if (file_idx == 0) { return 0; @@ -1894,12 +2149,7 @@ class TraceResolverLinuxImpl : public TraceResolve public: TraceResolverLinuxImpl() : _dwarf_loaded(false) {} - template - void load_stacktrace(ST&) - { - } - - ResolvedTrace resolve(ResolvedTrace trace) + ResolvedTrace resolve(ResolvedTrace trace) override { // trace.addr is a virtual address in memory pointing to some code. // Let's try to find from which loaded object it comes from. @@ -2901,7 +3151,7 @@ class TraceResolverLinuxImpl : public TraceResolve trace.object_function = demangler.demangle(linkage); dwarf_dealloc(dwarf, linkage, DW_DLA_STRING); } - dwarf_dealloc(dwarf, name, DW_DLA_ATTR); + dwarf_dealloc(dwarf, attr_mem, DW_DLA_ATTR); } break; @@ -3112,12 +3362,12 @@ class TraceResolverLinuxImpl : public TraceResolve { Dwarf_Attribute attr_mem; Dwarf_Error error = DW_DLE_NE; - Dwarf_Signed file_index; + Dwarf_Unsigned file_index; std::string file; if (dwarf_attr(die, DW_AT_call_file, &attr_mem, &error) == DW_DLV_OK) { - if (dwarf_formsdata(attr_mem, &file_index, &error) != DW_DLV_OK) { + if (dwarf_formudata(attr_mem, &file_index, &error) != DW_DLV_OK) { file_index = 0; } dwarf_dealloc(dwarf, attr_mem, DW_DLA_ATTR); @@ -3129,8 +3379,9 @@ class TraceResolverLinuxImpl : public TraceResolve char** srcfiles = 0; Dwarf_Signed file_count = 0; if (dwarf_srcfiles(cu_die, &srcfiles, &file_count, &error) == DW_DLV_OK) { - if (file_index <= file_count) + if (file_count > 0 && file_index <= static_cast(file_count)) { file = std::string(srcfiles[file_index - 1]); + } // Deallocate all strings! for (int i = 0; i < file_count; ++i) { @@ -3259,17 +3510,15 @@ class TraceResolverDarwinImpl; template <> class TraceResolverDarwinImpl : public TraceResolverImplBase { public: - template - void load_stacktrace(ST& st) + void load_addresses(void* const* addresses, int address_count) override { - using namespace details; - if (st.size() == 0) { + if (address_count == 0) { return; } - _symbols.reset(backtrace_symbols(st.begin(), st.size())); + _symbols.reset(backtrace_symbols(addresses, address_count)); } - ResolvedTrace resolve(ResolvedTrace trace) + ResolvedTrace resolve(ResolvedTrace trace) override { // parse: // + @@ -3368,9 +3617,9 @@ class get_mod_info { ret.base_address = mi.lpBaseOfDll; ret.load_size = mi.SizeOfImage; - GetModuleFileNameEx(process, module, temp, sizeof(temp)); + GetModuleFileNameExA(process, module, temp, sizeof(temp)); ret.image_name = temp; - GetModuleBaseName(process, module, temp, sizeof(temp)); + GetModuleBaseNameA(process, module, temp, sizeof(temp)); ret.module_name = temp; std::vector img(ret.image_name.begin(), ret.image_name.end()); std::vector mod(ret.module_name.begin(), ret.module_name.end()); @@ -3380,7 +3629,7 @@ class get_mod_info { }; template <> -class TraceResolverImpl { +class TraceResolverImpl : public TraceResolverImplBase { public: TraceResolverImpl() { @@ -3404,11 +3653,6 @@ class TraceResolverImpl { image_type = h->FileHeader.Machine; } - template - void load_stacktrace(ST&) - { - } - static const int max_sym_len = 255; struct symbol_t { SYMBOL_INFO sym; @@ -3417,31 +3661,32 @@ class TraceResolverImpl { DWORD64 displacement; - ResolvedTrace resolve(ResolvedTrace t) + ResolvedTrace resolve(ResolvedTrace t) override { HANDLE process = GetCurrentProcess(); char name[256]; - memset(&sym, 0, sizeof sym); + memset(&sym, 0, sizeof(sym)); sym.sym.SizeOfStruct = sizeof(SYMBOL_INFO); sym.sym.MaxNameLen = max_sym_len; if (!SymFromAddr(process, (ULONG64)t.addr, &displacement, &sym.sym)) { // TODO: error handling everywhere - LPTSTR lpMsgBuf; + char* lpMsgBuf; DWORD dw = GetLastError(); - FormatMessage( - FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, - dw, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - (LPTSTR)&lpMsgBuf, - 0, - NULL); - - printf(lpMsgBuf); + if (FormatMessageA( + FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, + dw, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (char*)&lpMsgBuf, + 0, + NULL)) { + std::fprintf(stderr, "%s\n", lpMsgBuf); + LocalFree(lpMsgBuf); + } // abort(); } @@ -3691,7 +3936,7 @@ class cfile_streambuf : public std::streambuf { int_type underflow() override { return traits_type::eof(); } int_type overflow(int_type ch) override { - if (traits_type::not_eof(ch) && fwrite(&ch, sizeof ch, 1, sink) == 1) { + if (traits_type::not_eof(ch) && fputc(ch, sink) != EOF) { return ch; } return traits_type::eof(); @@ -4029,11 +4274,17 @@ class SignalHandling { # elif defined(__arm__) error_addr = reinterpret_cast(uctx->uc_mcontext.arm_pc); # elif defined(__aarch64__) +# if defined(__APPLE__) + error_addr = reinterpret_cast(uctx->uc_mcontext->__ss.__pc); +# else error_addr = reinterpret_cast(uctx->uc_mcontext.pc); +# endif # elif defined(__mips__) error_addr = reinterpret_cast(reinterpret_cast(&uctx->uc_mcontext)->sc_pc); # elif defined(__ppc__) || defined(__powerpc) || defined(__powerpc__) || defined(__POWERPC__) error_addr = reinterpret_cast(uctx->uc_mcontext.regs->nip); +# elif defined(__riscv) + error_addr = reinterpret_cast(uctx->uc_mcontext.__gregs[REG_PC]); # elif defined(__s390x__) error_addr = reinterpret_cast(uctx->uc_mcontext.psw.addr); # elif defined(__APPLE__) && defined(__x86_64__) @@ -4044,10 +4295,10 @@ class SignalHandling { # warning ":/ sorry, ain't know no nothing none not of your architecture!" # endif if (error_addr) { - st.load_from(error_addr, 32); + st.load_from(error_addr, 32, reinterpret_cast(uctx), info->si_addr); } else { - st.load_here(32); + st.load_here(32, reinterpret_cast(uctx), info->si_addr); } Printer printer; @@ -4117,8 +4368,10 @@ class SignalHandling { signal(SIGABRT, signal_handler); _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); - set_terminate(&terminator); - set_unexpected(&terminator); + std::set_terminate(&terminator); +# ifndef BACKWARD_ATLEAST_CXX17 + std::set_unexpected(&terminator); +# endif _set_purecall_handler(&terminator); _set_invalid_parameter_handler(&invalid_parameter_handler); } @@ -4266,9 +4519,8 @@ class SignalHandling { StackTrace st; st.set_machine_type(printer.resolver().machine_type()); - st.set_context(ctx()); st.set_thread_handle(thread_handle()); - st.load_here(32 + skip_frames); + st.load_here(32 + skip_frames, ctx()); st.skip_n_firsts(skip_frames); printer.address = true; @@ -4291,5 +4543,6 @@ class SignalHandling { } // namespace backward +#endif /* no_backtrace */ + #endif /* H_GUARD */ -// clang-format on diff --git a/src/runtime.cpp b/src/runtime.cpp index 9175084..1013a19 100644 --- a/src/runtime.cpp +++ b/src/runtime.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include // for strtoul #include @@ -61,7 +62,7 @@ static size_t write_data(char* ptr, size_t size, size_t nmemb, void* userdata) return 0; } - auto const resp = static_cast(userdata); + auto* const resp = static_cast(userdata); assert(size == 1); (void)size; // avoid warning in release builds assert(resp); @@ -110,15 +111,13 @@ static size_t write_header(char* ptr, size_t size, size_t nmemb, void* userdata) logging::log_debug(LOG_TAG, "received header: %s", std::string(ptr, nmemb).c_str()); - auto const resp = static_cast(userdata); + auto* const resp = static_cast(userdata); assert(resp); for (size_t i = 0; i < nmemb; i++) { if (ptr[i] != ':') { continue; } - std::string key{ptr, i}; - std::string value{ptr + i + 1, nmemb - i - 1}; - resp->add_header(trim(key), trim(value)); + resp->add_header(trim({ptr, i}), trim({ptr + i + 1, nmemb - i - 1})); break; } return size * nmemb; @@ -127,7 +126,7 @@ static size_t write_header(char* ptr, size_t size, size_t nmemb, void* userdata) static size_t read_data(char* buffer, size_t size, size_t nitems, void* userdata) { auto const limit = size * nitems; - auto ctx = static_cast*>(userdata); + auto* ctx = static_cast*>(userdata); assert(ctx); auto const unread = ctx->first.length() - ctx->second; if (0 == unread) { @@ -135,12 +134,16 @@ static size_t read_data(char* buffer, size_t size, size_t nitems, void* userdata } if (unread <= limit) { - std::copy_n(ctx->first.begin() + ctx->second, unread, buffer); + auto from = ctx->first.begin(); + std::advance(from, ctx->second); + std::copy_n(from, unread, buffer); ctx->second += unread; return unread; } - std::copy_n(ctx->first.begin() + ctx->second, limit, buffer); + auto from = ctx->first.begin(); + std::advance(from, ctx->second); + std::copy_n(from, limit, buffer); ctx->second += limit; return limit; } @@ -160,9 +163,11 @@ static int rt_curl_debug_callback(CURL* handle, curl_infotype type, char* data, runtime::runtime(std::string const& endpoint) : runtime(endpoint, "AWS_Lambda_Cpp/" + std::string(get_version())) {} runtime::runtime(std::string const& endpoint, std::string const& user_agent) - : m_user_agent_header("User-Agent: " + user_agent), m_endpoints{{endpoint + "/2018-06-01/runtime/init/error", - endpoint + "/2018-06-01/runtime/invocation/next", - endpoint + "/2018-06-01/runtime/invocation/"}}, + : m_user_agent_header("User-Agent: " + user_agent), + m_endpoints{ + {endpoint + "/2018-06-01/runtime/init/error", + endpoint + "/2018-06-01/runtime/invocation/next", + endpoint + "/2018-06-01/runtime/invocation/"}}, m_curl_handle(curl_easy_init()) { if (!m_curl_handle) { @@ -240,7 +245,10 @@ runtime::next_outcome runtime::get_next() if (curl_code != CURLE_OK) { logging::log_debug(LOG_TAG, "CURL returned error code %d - %s", curl_code, curl_easy_strerror(curl_code)); - logging::log_error(LOG_TAG, "Failed to get next invocation. No Response from endpoint"); + logging::log_error( + LOG_TAG, + "Failed to get next invocation. No Response from endpoint \"%s\"", + m_endpoints[Endpoints::NEXT].c_str()); return aws::http::response_code::REQUEST_NOT_MADE; } @@ -264,32 +272,38 @@ runtime::next_outcome runtime::get_next() return resp.get_response_code(); } - if (!resp.has_header(REQUEST_ID_HEADER)) { + auto out = resp.get_header(REQUEST_ID_HEADER); + if (!out.is_success()) { logging::log_error(LOG_TAG, "Failed to find header %s in response", REQUEST_ID_HEADER); return aws::http::response_code::REQUEST_NOT_MADE; } invocation_request req; req.payload = resp.get_body(); - req.request_id = resp.get_header(REQUEST_ID_HEADER); + req.request_id = std::move(out).get_result(); - if (resp.has_header(TRACE_ID_HEADER)) { - req.xray_trace_id = resp.get_header(TRACE_ID_HEADER); + out = resp.get_header(TRACE_ID_HEADER); + if (out.is_success()) { + req.xray_trace_id = std::move(out).get_result(); } - if (resp.has_header(CLIENT_CONTEXT_HEADER)) { - req.client_context = resp.get_header(CLIENT_CONTEXT_HEADER); + out = resp.get_header(CLIENT_CONTEXT_HEADER); + if (out.is_success()) { + req.client_context = std::move(out).get_result(); } - if (resp.has_header(COGNITO_IDENTITY_HEADER)) { - req.cognito_identity = resp.get_header(COGNITO_IDENTITY_HEADER); + out = resp.get_header(COGNITO_IDENTITY_HEADER); + if (out.is_success()) { + req.cognito_identity = std::move(out).get_result(); } - if (resp.has_header(FUNCTION_ARN_HEADER)) { - req.function_arn = resp.get_header(FUNCTION_ARN_HEADER); + out = resp.get_header(FUNCTION_ARN_HEADER); + if (out.is_success()) { + req.function_arn = std::move(out).get_result(); } - if (resp.has_header(DEADLINE_MS_HEADER)) { - auto const& deadline_string = resp.get_header(DEADLINE_MS_HEADER); + out = resp.get_header(DEADLINE_MS_HEADER); + if (out.is_success()) { + auto const& deadline_string = std::move(out).get_result(); constexpr int base = 10; unsigned long ms = strtoul(deadline_string.c_str(), nullptr, base); assert(ms > 0); @@ -301,7 +315,7 @@ runtime::next_outcome runtime::get_next() req.payload.c_str(), static_cast(req.get_time_remaining().count())); } - return next_outcome(req); + return {req}; } runtime::post_outcome runtime::post_success(std::string const& request_id, invocation_response const& handler_response) @@ -396,7 +410,7 @@ void run_handler(std::function c { logging::log_info(LOG_TAG, "Initializing the C++ Lambda Runtime version %s", aws::lambda_runtime::get_version()); std::string endpoint("http://"); - if (auto ep = std::getenv("AWS_LAMBDA_RUNTIME_API")) { + if (auto* ep = std::getenv("AWS_LAMBDA_RUNTIME_API")) { assert(ep); logging::log_debug(LOG_TAG, "LAMBDA_SERVER_ADDRESS defined in environment as: %s", ep); endpoint += ep; @@ -487,7 +501,7 @@ static std::string json_escape(std::string const& in) // escape and print as unicode codepoint constexpr int printed_unicode_length = 6; // 4 hex + letter 'u' + \0 std::array buf; - sprintf(buf.data(), "u%04x", ch); + snprintf(buf.data(), buf.size(), "u%04x", ch); out.append(buf.data(), buf.size() - 1); // add only five, discarding the null terminator. break; } @@ -497,12 +511,12 @@ static std::string json_escape(std::string const& in) } AWS_LAMBDA_RUNTIME_API -invocation_response invocation_response::success(std::string const& payload, std::string const& content_type) +invocation_response invocation_response::success(std::string payload, std::string content_type) { invocation_response r; r.m_success = true; - r.m_content_type = content_type; - r.m_payload = payload; + r.m_content_type = std::move(content_type); + r.m_payload = std::move(payload); return r; } @@ -513,7 +527,7 @@ invocation_response invocation_response::failure(std::string const& error_messag r.m_success = false; r.m_content_type = "application/json"; r.m_payload = R"({"errorMessage":")" + json_escape(error_message) + R"(","errorType":")" + json_escape(error_type) + - R"(", "stackTrace":[]})"; + R"(","stackTrace":[]})"; return r; } diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index ddec1a8..7406096 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,16 +1,50 @@ +cmake_minimum_required(VERSION 3.11) project(aws-lambda-runtime-tests LANGUAGES CXX) -find_package(AWSSDK COMPONENTS lambda iam) -add_executable(${PROJECT_NAME} - main.cpp - runtime_tests.cpp - version_tests.cpp - gtest/gtest-all.cc) +if(DEFINED ENV{GITHUB_ACTIONS}) + # Fetch Google Test for unit tests + include(FetchContent) + FetchContent_Declare(gtest + URL https://github.com/google/googletest/archive/v1.12.0.tar.gz + DOWNLOAD_EXTRACT_TIMESTAMP TRUE + ) + # Configure build of googletest + set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + set(BUILD_GMOCK OFF CACHE BOOL "" FORCE) + set(INSTALL_GTEST OFF) + FetchContent_MakeAvailable(gtest) -target_link_libraries(${PROJECT_NAME} PRIVATE ${AWSSDK_LINK_LIBRARIES} aws-lambda-runtime) + add_executable(unit_tests + unit/no_op_test.cpp) + target_link_libraries(unit_tests PRIVATE gtest_main aws-lambda-runtime) -include(GoogleTest) -gtest_discover_tests(${PROJECT_NAME} EXTRA_ARGS "--aws_prefix=${TEST_RESOURCE_PREFIX}") # requires CMake 3.10 or later + # Register unit tests + include(GoogleTest) + gtest_discover_tests(unit_tests + PROPERTIES + LABELS "unit" + DISCOVERY_TIMEOUT 10) +else() + message(STATUS "Unit tests skipped: Not in GitHub Actions environment") +endif() -add_subdirectory(resources) + +find_package(AWSSDK COMPONENTS lambda iam QUIET) + +if(AWSSDK_FOUND) + add_executable(${PROJECT_NAME} + integration/main.cpp + integration/runtime_tests.cpp + integration/version_tests.cpp + gtest/gtest-all.cc) + + target_link_libraries(${PROJECT_NAME} PRIVATE ${AWSSDK_LINK_LIBRARIES} aws-lambda-runtime) + + include(GoogleTest) + gtest_discover_tests(${PROJECT_NAME} EXTRA_ARGS "--aws_prefix=${TEST_RESOURCE_PREFIX}") + + add_subdirectory(resources) +else() + message(STATUS "Integration tests skipped: AWS SDK not found or not in GitHub Actions environment") +endif() diff --git a/tests/gtest/gtest.h b/tests/gtest/gtest.h index 844c9b7..deeb98d 100644 --- a/tests/gtest/gtest.h +++ b/tests/gtest/gtest.h @@ -1,3 +1,4 @@ +// clang-format off // Copyright 2005, Google Inc. // All rights reserved. // diff --git a/tests/main.cpp b/tests/integration/main.cpp similarity index 97% rename from tests/main.cpp rename to tests/integration/main.cpp index d7700e8..2a112d3 100644 --- a/tests/main.cpp +++ b/tests/integration/main.cpp @@ -1,6 +1,6 @@ #include #include -#include "gtest/gtest.h" +#include "../gtest/gtest.h" std::function()> get_console_logger_factory() { diff --git a/tests/runtime_tests.cpp b/tests/integration/runtime_tests.cpp similarity index 74% rename from tests/runtime_tests.cpp rename to tests/integration/runtime_tests.cpp index 0032429..843f815 100644 --- a/tests/runtime_tests.cpp +++ b/tests/integration/runtime_tests.cpp @@ -1,4 +1,6 @@ +#include #include +#include #include #include #include @@ -9,9 +11,16 @@ #include #include #include -#include #include -#include "gtest/gtest.h" +#include +#include "../gtest/gtest.h" +#include +#include +#include +#include +#include +#include +#include extern std::string aws_prefix; @@ -19,8 +28,7 @@ namespace { using namespace Aws::Lambda; -constexpr auto S3BUCKET = "aws-lambda-cpp-tests"; -constexpr auto S3KEY = "lambda-test-fun.zip"; +constexpr auto ZIP_FILE_PATH = "resources/lambda-test-fun.zip"; constexpr auto REQUEST_TIMEOUT = 15 * 1000; struct LambdaRuntimeTest : public ::testing::Test { @@ -39,6 +47,9 @@ struct LambdaRuntimeTest : public ::testing::Test { Aws::Client::ClientConfiguration config; config.requestTimeoutMs = REQUEST_TIMEOUT; config.region = Aws::Environment::GetEnv("AWS_REGION"); + if (config.region.empty()) { + throw std::invalid_argument("environment variable AWS_REGION not set"); + } return config; } @@ -55,6 +66,7 @@ struct LambdaRuntimeTest : public ::testing::Test { delete_function(build_resource_name("echo_success"), false /*assert*/); delete_function(build_resource_name("echo_failure"), false /*assert*/); delete_function(build_resource_name("binary_response"), false /*assert*/); + delete_function(build_resource_name("crash_backtrace"), false /*assert*/); } Aws::String get_role_arn(Aws::String const& role_name) @@ -77,14 +89,36 @@ struct LambdaRuntimeTest : public ::testing::Test { create_function_request.SetHandler(handler_name); create_function_request.SetFunctionName(function_name); // I ran into eventual-consistency issues when creating the role dynamically as part of the test. - create_function_request.SetRole(get_role_arn("integration-tests")); + auto exec_role = Aws::Environment::GetEnv("LAMBDA_TEST_ROLE"); + if (exec_role.empty()) { + exec_role = "integration-tests"; + } + create_function_request.SetRole(get_role_arn(exec_role)); + + struct stat s; + auto rc = stat(ZIP_FILE_PATH, &s); + ASSERT_EQ(rc, 0) << std::string("file does not exist: ") + ZIP_FILE_PATH; + Aws::Utils::CryptoBuffer zip_file_bytes(s.st_size); + auto* zip_file = fopen(ZIP_FILE_PATH, "r"); + fread(zip_file_bytes.GetUnderlyingData(), sizeof(unsigned char), s.st_size, zip_file); + fclose(zip_file); + Model::FunctionCode funcode; - funcode.WithS3Bucket(S3BUCKET).WithS3Key(build_resource_name(S3KEY)); - create_function_request.SetCode(funcode); - create_function_request.SetRuntime(Aws::Lambda::Model::Runtime::provided); + funcode.SetZipFile(std::move(zip_file_bytes)); + create_function_request.SetCode(std::move(funcode)); + create_function_request.SetRuntime(Aws::Lambda::Model::Runtime::provided_al2); + + std::vector lambda_architectures = {Aws::Lambda::Model::Architecture::x86_64}; +#ifdef __aarch64__ + lambda_architectures[0] = Aws::Lambda::Model::Architecture::arm64; +#endif + create_function_request.SetArchitectures(lambda_architectures); auto outcome = m_lambda_client.CreateFunction(create_function_request); ASSERT_TRUE(outcome.IsSuccess()) << "Failed to create function " << function_name; + + // work around Lambda function pending creation state + sleep(5); } void delete_function(Aws::String const& function_name, bool assert = true) @@ -191,4 +225,25 @@ TEST_F(LambdaRuntimeTest, binary_response) EXPECT_EQ(expected_length, invoke_outcome.GetResult().GetPayload().tellp()); delete_function(funcname); } + +TEST_F(LambdaRuntimeTest, crash) +{ + Aws::String const funcname = build_resource_name("crash_backtrace"); + create_function(funcname, "crash_backtrace" /*handler_name*/); + Model::InvokeRequest invoke_request; + invoke_request.SetFunctionName(funcname); + invoke_request.SetInvocationType(Model::InvocationType::RequestResponse); + invoke_request.SetLogType(Model::LogType::Tail); + + Model::InvokeOutcome invoke_outcome = m_lambda_client.Invoke(invoke_request); + EXPECT_TRUE(invoke_outcome.IsSuccess()); + EXPECT_EQ(200, invoke_outcome.GetResult().GetStatusCode()); + EXPECT_STREQ("Unhandled", invoke_outcome.GetResult().GetFunctionError().c_str()); + Aws::Utils::Base64::Base64 base64; + auto decoded = base64.Decode(invoke_outcome.GetResult().GetLogResult()); + std::string tail_logs(reinterpret_cast(decoded.GetUnderlyingData()), decoded.GetLength()); + EXPECT_NE(tail_logs.find("Stack trace (most recent call last):"), std::string::npos); + delete_function(funcname); +} + } // namespace diff --git a/tests/version_tests.cpp b/tests/integration/version_tests.cpp similarity index 87% rename from tests/version_tests.cpp rename to tests/integration/version_tests.cpp index 862c680..a0f546e 100644 --- a/tests/version_tests.cpp +++ b/tests/integration/version_tests.cpp @@ -1,5 +1,5 @@ #include -#include "gtest/gtest.h" +#include "../gtest/gtest.h" using namespace aws::lambda_runtime; @@ -12,7 +12,7 @@ TEST(VersionTests, get_version_major) TEST(VersionTests, get_version_minor) { auto version = get_version_minor(); - ASSERT_GE(version, 1); + ASSERT_GE(version, 0); } TEST(VersionTests, get_version_patch) diff --git a/tests/resources/lambda_function.cpp b/tests/resources/lambda_function.cpp index 43f3dac..bf12fc0 100644 --- a/tests/resources/lambda_function.cpp +++ b/tests/resources/lambda_function.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -22,8 +23,14 @@ invocation_response echo_failure(invocation_request const& /*request*/) invocation_response binary_response(invocation_request const& /*request*/) { - const std::string png((char*)awslogo_png, AWSLOGO_PNG_LEN); - return invocation_response::success(png, "image/png"); + std::string png((char*)awslogo_png, AWSLOGO_PNG_LEN); + return invocation_response::success(std::move(png), "image/png"); +} + +invocation_response crash_backtrace(invocation_request const& /*request*/) +{ + throw std::runtime_error("barf"); + return invocation_response::failure("unreachable", "unreachable"); } int main(int argc, char* argv[]) @@ -32,13 +39,11 @@ int main(int argc, char* argv[]) handlers.emplace("echo_success", echo_success); handlers.emplace("echo_failure", echo_failure); handlers.emplace("binary_response", binary_response); + handlers.emplace("crash_backtrace", crash_backtrace); - if (argc < 2) { - aws::logging::log_error("lambda_fun", "Missing handler argument. Exiting."); - return -1; - } - - auto it = handlers.find(argv[1]); + // Read the handler from the environment variable + const char* handler_name = std::getenv("_HANDLER"); + auto it = handlers.find(handler_name == nullptr ? "" : handler_name); if (it == handlers.end()) { aws::logging::log_error("lambda_fun", "Handler %s not found. Exiting.", argv[1]); return -2; diff --git a/tests/unit/no_op_test.cpp b/tests/unit/no_op_test.cpp new file mode 100644 index 0000000..c9a3b7d --- /dev/null +++ b/tests/unit/no_op_test.cpp @@ -0,0 +1,6 @@ +#include + +TEST(noop, dummy_test) +{ + ASSERT_EQ(0, 0); +}