3
votes

I simply cannot enable my Lambda function to connect to create mappings on my AWS Elasticsearch server. I am using Node and the elasticsearch package, and I receive the following error (in CloudWatch logs):

ERROR   Uncaught Exception
{
    "errorType": "Error [ERR_UNHANDLED_ERROR]",
    "errorMessage": "Unhandled error. ({ message: 'Unable to create Elasticsearch mapping for Log',\n  data:\n   'Error: Authorization Exception\\n    at respond (/var/task/node_modules/elasticsearch/src/lib/transport.js:308:15)\\n    at checkRespForFailure (/var/task/node_modules/elasticsearch/src/lib/transport.js:267:7)\\n    at done (/var/task/node_modules/http-aws-es/connector.js:48:7)\\n    at IncomingMessage.cleanUp (/var/task/node_modules/http-aws-es/src/node.js:20:7)\\n    at IncomingMessage.emit (events.js:203:15)\\n    at IncomingMessage.EventEmitter.emit (domain.js:448:20)\\n    at endReadableNT (_stream_readable.js:1129:12)\\n    at process._tickCallback (internal/process/next_tick.js:63:19)' })",
    "code": "ERR_UNHANDLED_ERROR",
    "stack": [
        "Error [ERR_UNHANDLED_ERROR]: Unhandled error. ({ message: 'Unable to create Elasticsearch mapping for Log',",
        "  data:",
        "   'Error: Authorization Exception\\n    at respond (/var/task/node_modules/elasticsearch/src/lib/transport.js:308:15)\\n    at checkRespForFailure (/var/task/node_modules/elasticsearch/src/lib/transport.js:267:7)\\n    at done (/var/task/node_modules/http-aws-es/connector.js:48:7)\\n    at IncomingMessage.cleanUp (/var/task/node_modules/http-aws-es/src/node.js:20:7)\\n    at IncomingMessage.emit (events.js:203:15)\\n    at IncomingMessage.EventEmitter.emit (domain.js:448:20)\\n    at endReadableNT (_stream_readable.js:1129:12)\\n    at process._tickCallback (internal/process/next_tick.js:63:19)' })",
        "    at Object.emit (events.js:187:17)",
        "    at limiter.removeTokens (/var/task/node_modules/@my-company/my-package/lib/logger.js:158:10)",
        "    at afterTokensRemoved (/var/task/node_modules/limiter/lib/rateLimiter.js:87:7)",
        "    at process._tickCallback (internal/process/next_tick.js:61:11)"
    ],
    "context": {
        "message": "Unable to create Elasticsearch mapping for Log",
        "data": "Error: Authorization Exception\n    at respond (/var/task/node_modules/elasticsearch/src/lib/transport.js:308:15)\n    at checkRespForFailure (/var/task/node_modules/elasticsearch/src/lib/transport.js:267:7)\n    at done (/var/task/node_modules/http-aws-es/connector.js:48:7)\n    at IncomingMessage.cleanUp (/var/task/node_modules/http-aws-es/src/node.js:20:7)\n    at IncomingMessage.emit (events.js:203:15)\n    at IncomingMessage.EventEmitter.emit (domain.js:448:20)\n    at endReadableNT (_stream_readable.js:1129:12)\n    at process._tickCallback (internal/process/next_tick.js:63:19)"
    }
}

It seems the Lambda is able to connect to the Elasticsearch server, but it receives an Authorization Exception error (a 403, I believe).

I am using the Serverless framework. My Serverless config looks like this:

provider:
  name: aws
  runtime: nodejs10.x
  stage: ${opt:stage, 'dev'}
  region: us-east-2
  memorySize: 512
  timeout: 30
  vpc:
    securityGroupIds:
      - ${ssm:/my-company/security-group/lambda}
    subnetIds:
      - ${ssm:/my-company/subnet/lambda1/id}
      - ${ssm:/my-company/subnet/lambda2/id}
      - ${ssm:/my-company/subnet/lambda3/id}

functions:
  myFunctionName:
    handler: src/my-function-name.handler
    events:
      - sqs:
          arn: ${ssm:/my-company/${self:provider.stage}/update-print-image-status-queue-arn}

package:
  exclude:
    - .circleci/**
    - .terraform/**
    - test/**

The IAM role used by the Lambda has full Elasticsearch access:

{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Sid": "VisualEditor0",
            "Effect": "Allow",
            "Action": [
                "sqs:DeleteMessage",
                "logs:CreateLogStream",
                "sqs:ReceiveMessage",
                "sqs:GetQueueAttributes"
            ],
            "Resource": [
                "arn:aws:logs:us-east-2:<AWS account ID>:log-group:/aws/lambda/<project-name>-dev*:*",
                "arn:aws:sqs:us-east-2:<AWS account ID>:update-print-image-status-queue-dev"
            ]
        },
        {
            "Sid": "VisualEditor1",
            "Effect": "Allow",
            "Action": "logs:PutLogEvents",
            "Resource": "arn:aws:logs:us-east-2:<AWS account ID>:log-group:/aws/lambda/<project-name>-dev*:*:*"
        },
        {
            "Sid": "VisualEditor2",
            "Effect": "Allow",
            "Action": "es:*",
            "Resource": "*"
        }
    ]
}

The VPC and subnets definitely exist, and the expected subnets are associated with the Lambda. The Lambda is able to access and query my MongoDB EC2 server with no issues. The Elasticsearch server is in the same VPC and subnet as the MongoDB EC2 server.

Here is how I initialize the ES client from Node:

const AWS = require('aws-sdk');
const elasticsearch = require('elasticsearch');

client = elasticsearch.Client({
  host: process.env.ELASTICSEARCH_HOSTS,
  connectionClass: require('http-aws-es'),
  awsConfig: new AWS.Config({
    accessKeyId: process.env.AWS_ACCESS_KEY_ID,
    secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
    region: process.env.ELASTICSEARCH_REGION,
  })
});

ELASTICSEARCH_HOSTS and ELASTICSEARCH_REGION are both defined as environment variables for the Lambda with correct values.

The client is then used by mongoosastic.

This same code with no changes works fine running on an EC2 server (which is in the same VPC and subnet as the MongoDB EC2 server).

The Elasticsearch service (managed by AWS; version 6.3) has the following policy:

{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "AWS": "*"
      },
      "Action": "es:*",
      "Resource": "arn:aws:es:us-east-2:<aws account ID>:domain/<my domain>/*"
    }
  ]
}

So, I am stumped as to why I am seeing "Authorization Exception". Any ideas?

2

2 Answers

3
votes

I believe I have solved this. The following works:

const AWS = require('aws-sdk');
const elasticsearch = require('elasticsearch');

client = elasticsearch.Client({
  host: process.env.ELASTICSEARCH_HOSTS,
  connectionClass: require('http-aws-es')
});

Specifying access keys isn't necessary. I have the security group settings for the elasticsearch server such that the Lambda security group has ingress access.

0
votes
    import gitlab
    import logging
    from elasticsearch import Elasticsearch, RequestsHttpConnection
    from requests_aws4auth import AWS4Auth
    import boto3
    #from aws_requests_auth.aws_auth import AWSRequestsAuth

    LOGGER = logging.getLogger()
    ES_HOST = {'host':'search-testelasticsearch-xxxxxxxxx.eu-west-2.es.amazonaws.com', 'port': 443}


    def lambda_handler(event, context):
        LOGGER.info('started')
        gl = gitlab.Gitlab('xxxxxxx', private_token='xxxxxx')
        group = gl.groups.get('Thunderbird')
        project = gl.projects.get(92, lazy=True)
        mrs = project.mergerequests.list(state='opened', order_by='updated_at')
        first_mr = mrs[0]
        dump = {
          'title': first_mr.title,
          'state': first_mr.state,
          'name': first_mr.author.get('name'),
          'comments': first_mr.user_notes_count
        }

        dump2={
            'number_of_mrs': 9
        }

        service = 'es'
        credentials = boto3.Session().get_credentials()
        awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, "eu-west-2", service, session_token=credentials.token)
        es = Elasticsearch(hosts=[ES_HOST], http_auth = awsauth, use_ssl = True, verify_certs = True, connection_class = RequestsHttpConnection)
        DAVID_INDEX = 'david_test_index'
        response = es.index(index=DAVID_INDEX, doc_type='is_this_important?', body=dump2, id='4')```