0
votes

node.js code below

created a bucket in 'ap-south-1' Region bucket link in public u can view on :- https://iamgroot007.s3.ap-south-1.amazonaws.com/deadpool.png

const AWS = require("aws-sdk");
const fs = require("fs");
const BUCKET = process.env.BUCKET;
const REGION = process.env.REGION;
const ACCESS_KEY = process.env.ACCESS_KEY_ID;
const SECRET_KEY = process.env.SECRET_ACCESS_KEY;
const localImage = "./ap.png";
const imageRemoteName = `catImage_${new Date().getTime()}.png`;

router.post("/image-upload", (req, res) => {
  AWS.config.update({
    accessKeyId: ACCESS_KEY,
    secretAccessKey: SECRET_KEY,
    region: REGION,
  });

  const s3 = new AWS.S3();

  s3.putObject({
    Bucket: BUCKET,
    Body: fs.readFileSync(localImage),
    Key: imageRemoteName,
  })
    .promise()
    .then((response) => {
      console.log(`done! - `, response);
      console.log(
        `The URL is ${s3.getSignedUrl("getObject", {
          Bucket: BUCKET,
          Key: imageRemoteName,
        })}`
      );
    })
    .catch((err) => {
      console.log("failed:", err);
    });

Getting Error :-

message: 'Inaccessible host: s3.ap-south-1\'. This service may not be available in the ap-south-1,' region.', code: 'UnknownEndpoint', region: 'ap-south-1,', hostname: 's3.ap-south-1', retryable: true, originalError: { Error: getaddrinfo ENOTFOUND s3.ap-south-1 s3.ap-south-1:443 at GetAddrInfoReqWrap.onlookup [as oncomplete] (dns.js:56:26) message: 'getaddrinfo ENOTFOUND s3.ap-south-1 s3.ap-south-1:443', errno: 'ENOTFOUND', code: 'NetworkingError', syscall: 'getaddrinfo', hostname: 's3.ap-south-1', host: 's3.ap-south-1', port: 443, region: 'ap-south-1,', retryable: true, time: 2020-09-11T19:08:30.062Z }, time: 2020-09-11T19:08:30.062Z }

1
Does the hostname in the error message (`s3.ap-south-1\`) literally have that trailing slash or is that a copy/paste error of some sort on your part? Have you verified, by logging, that your BUCKET and REGION environment variable values are actually correct? - jarmod
Yes my bucket and region name is correct and the error is been pasted as it is - Saif Farooqui
Unrelated to your connectivity issue, the way you are initializing imageRemoteName is wrong. You should move it into the function handler. The way you have written it, multiple uploads may be written to the same cat file if the Lambda function is warm when invoked (in which case the initialization code outside of the function handler is not re-run). - jarmod
Thanks for the suggestion. I will update as u said - Saif Farooqui
Actually maybe this is not even running on Lambda, but the comment remains that you should init imageRemoteName in the upload route handler. Can you run the awscli on the same system? Maybe test the awscli uploading an object to that same ap-south-1 bucket using the same BUCKET and REGION environment variables in the command line. - jarmod

1 Answers

0
votes

There was 2 mistakes 1> In .env folder i was having ',' after secretKey,accessKey and Bucket name 2> i did not use multer middleware for accepting the incoming FormData

Working code for uploading image is:-

const s3 = new AWS.S3({
  accessKeyId: ACCESS_KEY,
  secretAccessKey: SECRET_KEY,
});

const storage = multer.memoryStorage({
  destination: function (req, file, callback) {
    callback(null, "");
  },
});

const upload = multer({ storage }).single("image");

router.post("/image-upload", upload, (req, res) => {
  let fileName = req.file.originalname.split(".");
  const myFileType = fileName[fileName.length - 1];
  const Key = `shopLogo/${uuidv4()}.${myFileType}`;
  console.log(fileName, myFileType, Key, BUCKET);
  const params = {
    Bucket: BUCKET,
    Key,
    Body: req.file.buffer,
  };

  s3.upload(params, (err, data) => {
    if (err) {
      res.status(500).send(err);
    }
    res.status(200).send(data);
  });
});