Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ on:
workflow_dispatch:

env:
GOLANGCI_LINT_VERSION: v2.1.6
GOLANGCI_LINT_VERSION: v2.7.2
GOLANGCI_LINT_TIMEOUT: 10m

concurrency:
Expand Down
4 changes: 0 additions & 4 deletions .golangci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,6 @@ linters:
- prealloc
- unconvert
- unparam
settings:
errcheck:
exclude-functions:
- (*go.uber.org/zap.Logger).Sync
exclusions:
generated: lax
presets:
Expand Down
53 changes: 26 additions & 27 deletions aws-kinesis-http-connector/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,9 @@ import (
"sync"
"time"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/kinesis"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/kinesis"
"github.com/aws/aws-sdk-go-v2/service/kinesis/types"

"github.com/fission/keda-connectors/common"

Expand All @@ -20,23 +21,23 @@ import (

type pullFunc func(*record) error
type record struct {
*kinesis.Record
*types.Record
shardID string
millisBehindLatest *int64
}
type awsKinesisConnector struct {
ctx context.Context
client *kinesis.Kinesis
client *kinesis.Client
connectordata common.ConnectorMetadata
logger *zap.Logger
shardc chan *kinesis.Shard
maxRecords int64
shardc chan *types.Shard
maxRecords int32
}

// listShards get called every 30sec to get all the shards
func (conn *awsKinesisConnector) listShards() ([]*kinesis.Shard, error) {
func (conn *awsKinesisConnector) listShards() ([]types.Shard, error) {
// call DescribeStream to get updated shards
stream, err := conn.client.DescribeStream(&kinesis.DescribeStreamInput{
stream, err := conn.client.DescribeStream(context.TODO(), &kinesis.DescribeStreamInput{
StreamName: &conn.connectordata.Topic,
})
if err != nil {
Expand Down Expand Up @@ -65,7 +66,7 @@ func (conn *awsKinesisConnector) findNewShards() {
// send only new shards
_, loaded := shards.LoadOrStore(*s.ShardId, s)
if !loaded {
conn.shardc <- s
conn.shardc <- &s
}
}
}
Expand All @@ -82,16 +83,16 @@ func (conn *awsKinesisConnector) getIterator(shardID string, checkpoint string)
if checkpoint != "" {
// Start from, where we left
params.StartingSequenceNumber = aws.String(checkpoint)
params.ShardIteratorType = aws.String(kinesis.ShardIteratorTypeAfterSequenceNumber)
iteratorOutput, err := conn.client.GetShardIteratorWithContext(conn.ctx, params)
params.ShardIteratorType = types.ShardIteratorTypeAfterSequenceNumber
iteratorOutput, err := conn.client.GetShardIterator(conn.ctx, params)
if err != nil {
return nil, err
}
return iteratorOutput, err
}
// Start from, oldest record in the shard
params.ShardIteratorType = aws.String(kinesis.ShardIteratorTypeTrimHorizon)
iteratorOutput, err := conn.client.GetShardIteratorWithContext(conn.ctx, params)
params.ShardIteratorType = types.ShardIteratorTypeTrimHorizon
iteratorOutput, err := conn.client.GetShardIterator(conn.ctx, params)
if err != nil {
return nil, err
}
Expand All @@ -101,7 +102,7 @@ func (conn *awsKinesisConnector) getIterator(shardID string, checkpoint string)
// getRecords get the data for the specific shard
func (conn *awsKinesisConnector) getRecords(shardIterator *string) (*kinesis.GetRecordsOutput, error) {
// get records use shard iterator for making request
records, err := conn.client.GetRecords(&kinesis.GetRecordsInput{
records, err := conn.client.GetRecords(context.TODO(), &kinesis.GetRecordsInput{
ShardIterator: shardIterator,
Limit: &conn.maxRecords,
})
Expand Down Expand Up @@ -158,7 +159,7 @@ func (conn *awsKinesisConnector) pullRecords(fn pullFunc) {

for _, r := range resp.Records {
// send records
err := fn(&record{r, shardID, resp.MillisBehindLatest})
err := fn(&record{&r, shardID, resp.MillisBehindLatest})
checkpoints.Store(shardID, *r.SequenceNumber)
if err != nil {
conn.logger.Error("error in processing records",
Expand Down Expand Up @@ -233,7 +234,7 @@ func (conn *awsKinesisConnector) responseHandler(r *record, response string) err
StreamName: aws.String(conn.connectordata.ResponseTopic), // Required
SequenceNumberForOrdering: aws.String(*r.SequenceNumber),
}
_, err := conn.client.PutRecord(params)
_, err := conn.client.PutRecord(context.TODO(), params)
if err != nil {
return err
}
Expand All @@ -250,7 +251,7 @@ func (conn *awsKinesisConnector) errorHandler(r *record, errMsg string) {
SequenceNumberForOrdering: aws.String(*r.SequenceNumber),
}

_, err := conn.client.PutRecord(params)
_, err := conn.client.PutRecord(context.TODO(), params)
if err != nil {
conn.logger.Error("failed to publish message to error topic",
zap.Error(err),
Expand All @@ -272,29 +273,27 @@ func main() {
if err != nil {
log.Fatalf("can't initialize zap logger: %v", err)
}
defer logger.Sync()
defer func() {
_ = logger.Sync()
}()

ctx, cancel := context.WithCancel(context.Background())
defer cancel()

config, err := common.GetAwsConfig()
config, err := common.GetAwsConfig(ctx)
if err != nil {
logger.Error("failed to fetch aws config", zap.Error(err))
return
}

s, err := common.CreateValidatedSession(config)
if err != nil {
logger.Error("not able to create the session", zap.Error(err))
return
}
kc := kinesis.New(s)
kc := kinesis.NewFromConfig(config)
connectordata, err := common.ParseConnectorMetadata()
if err != nil {
logger.Error("error while parsing metadata", zap.Error(err))
return
}
if err := kc.WaitUntilStreamExists(&kinesis.DescribeStreamInput{StreamName: &connectordata.Topic}); err != nil {
waiter := kinesis.NewStreamExistsWaiter(kc)
if err := waiter.Wait(context.TODO(), &kinesis.DescribeStreamInput{StreamName: &connectordata.Topic}, 5*time.Minute); err != nil {
logger.Error("not able to connect to kinesis stream", zap.Error(err))
return
}
Expand All @@ -306,7 +305,7 @@ func main() {
cancel() // call cancellation
}()

shardc := make(chan *kinesis.Shard, 1)
shardc := make(chan *types.Shard, 1)

conn := awsKinesisConnector{
ctx: ctx,
Expand Down
2 changes: 1 addition & 1 deletion aws-kinesis-http-connector/version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
v0.18
v0.19
50 changes: 24 additions & 26 deletions aws-sqs-http-connector/main.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package main

import (
"context"
"io"
"log"
"net/url"
Expand All @@ -11,15 +12,16 @@ import (

"go.uber.org/zap"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/sqs"
"github.com/aws/aws-sdk-go-v2/service/sqs/types"

"github.com/fission/keda-connectors/common"
)

type awsSQSConnector struct {
sqsURL *url.URL
sqsClient *sqs.SQS
sqsClient *sqs.Client
connectordata common.ConnectorMetadata
logger *zap.Logger
}
Expand All @@ -34,8 +36,8 @@ func parseURL(baseURL *url.URL, queueName string) (string, error) {
}

func (conn awsSQSConnector) consumeMessage() {
var maxNumberOfMessages = int64(10) // Process maximum 10 messages concurrently
var waitTimeSeconds = int64(5) // Wait 5 sec to process another message
var maxNumberOfMessages = int32(10) // Process maximum 10 messages concurrently
var waitTimeSeconds = int32(5) // Wait 5 sec to process another message
var respQueueURL, errorQueueURL string
headers := http.Header{
"KEDA-Topic": {conn.connectordata.Topic},
Expand Down Expand Up @@ -67,10 +69,10 @@ func (conn awsSQSConnector) consumeMessage() {
conn.logger.Info("starting to consume messages from queue", zap.String("queue", consQueueURL), zap.String("response queue", respQueueURL), zap.String("error queue", errorQueueURL))

for {
output, err := conn.sqsClient.ReceiveMessage(&sqs.ReceiveMessageInput{
output, err := conn.sqsClient.ReceiveMessage(context.TODO(), &sqs.ReceiveMessageInput{
QueueUrl: &consQueueURL,
MaxNumberOfMessages: &maxNumberOfMessages,
WaitTimeSeconds: &waitTimeSeconds,
MaxNumberOfMessages: maxNumberOfMessages,
WaitTimeSeconds: waitTimeSeconds,
})

if err != nil {
Expand All @@ -80,7 +82,7 @@ func (conn awsSQSConnector) consumeMessage() {
for _, message := range output.Messages {
// Set the attributes as message header came from SQS record
for k, v := range message.Attributes {
headers.Add(k, *v)
headers.Add(k, v)
}

resp, err := common.HandleHTTPRequest(*message.Body, headers, conn.connectordata, conn.logger)
Expand All @@ -92,10 +94,10 @@ func (conn awsSQSConnector) consumeMessage() {
conn.errorHandler(errorQueueURL, err)
} else {
// Generating SQS Message attribute
var sqsMessageAttValue = make(map[string]*sqs.MessageAttributeValue)
var sqsMessageAttValue = make(map[string]types.MessageAttributeValue)
for k, v := range resp.Header {
for _, d := range v {
sqsMessageAttValue[k] = &sqs.MessageAttributeValue{
sqsMessageAttValue[k] = types.MessageAttributeValue{
DataType: aws.String("String"),
StringValue: aws.String(d),
}
Expand All @@ -114,10 +116,10 @@ func (conn awsSQSConnector) consumeMessage() {
}
}

func (conn awsSQSConnector) responseHandler(queueURL string, response string, messageAttValue map[string]*sqs.MessageAttributeValue) bool {
func (conn awsSQSConnector) responseHandler(queueURL string, response string, messageAttValue map[string]types.MessageAttributeValue) bool {
if queueURL != "" {
_, err := conn.sqsClient.SendMessage(&sqs.SendMessageInput{
DelaySeconds: aws.Int64(10),
_, err := conn.sqsClient.SendMessage(context.TODO(), &sqs.SendMessageInput{
DelaySeconds: int32(10),
MessageAttributes: messageAttValue,
MessageBody: &response,
QueueUrl: &queueURL,
Expand All @@ -140,8 +142,8 @@ func (conn awsSQSConnector) responseHandler(queueURL string, response string, me
func (conn *awsSQSConnector) errorHandler(queueURL string, err error) {
if queueURL != "" {
errMsg := err.Error()
_, err := conn.sqsClient.SendMessage(&sqs.SendMessageInput{
DelaySeconds: aws.Int64(10),
_, err := conn.sqsClient.SendMessage(context.TODO(), &sqs.SendMessageInput{
DelaySeconds: int32(10),
//MessageAttributes: messageAttValue,
MessageBody: &errMsg,
QueueUrl: &queueURL,
Expand All @@ -163,7 +165,7 @@ func (conn *awsSQSConnector) errorHandler(queueURL string, err error) {
}

func (conn *awsSQSConnector) deleteMessage(id string, queueURL string) {
_, err := conn.sqsClient.DeleteMessage(&sqs.DeleteMessageInput{
_, err := conn.sqsClient.DeleteMessage(context.TODO(), &sqs.DeleteMessageInput{
QueueUrl: &queueURL,
ReceiptHandle: &id,
})
Expand All @@ -181,24 +183,20 @@ func main() {
if err != nil {
log.Fatalf("can't initialize zap logger: %v", err)
}
defer logger.Sync()
defer func() {
_ = logger.Sync()
}()

connectordata, err := common.ParseConnectorMetadata()
if err != nil {
logger.Fatal("failed to parse connector metadata", zap.Error(err))
}
config, err := common.GetAwsConfig()
config, err := common.GetAwsConfig(context.TODO())
if err != nil {
logger.Error("failed to fetch aws config", zap.Error(err))
return
}

sess, err := common.CreateValidatedSession(config)
if err != nil {
logger.Error("not able create session using aws configuration", zap.Error(err))
return
}
svc := sqs.New(sess)
svc := sqs.NewFromConfig(config)

sqsURL, err := url.Parse(strings.TrimSuffix(os.Getenv("QUEUE_URL"), os.Getenv("TOPIC")))
if err != nil {
Expand Down
40 changes: 15 additions & 25 deletions aws-sqs-http-connector/test/validate/main.go
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
package main

import (
"context"
"fmt"
"log"
"os"
"time"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/sqs"

"github.com/fission/keda-connectors/common"
)

// comment
Expand All @@ -17,30 +19,18 @@ func main() {
if queueURL == "" {
log.Fatal("AWS_SQS_URL is not set")
}
region := os.Getenv("AWS_REGION")
if region == "" {
log.Fatal("AWS_REGION is not set")
}
endpoint := os.Getenv("AWS_ENDPOINT")
if endpoint == "" {
log.Fatal("AWS_ENDPOINT is not set")
}
config := &aws.Config{
Region: &region,
Endpoint: &endpoint,
}

sess, err := session.NewSession(config)
config, err := common.GetAwsConfig(context.TODO())
if err != nil {
log.Fatal("Error while creating session", err)
log.Fatal("Error while getting AWS config", err)
}
svc := sqs.New(sess)
svc := sqs.NewFromConfig(config)

msg := "Hello Msg"
url := queueURL + "/my_queue"
log.Println("Sending message to queue", url)
_, err = svc.SendMessage(&sqs.SendMessageInput{
DelaySeconds: aws.Int64(10),
_, err = svc.SendMessage(context.TODO(), &sqs.SendMessageInput{
DelaySeconds: *aws.Int32(10),
Copy link

Copilot AI Jan 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Incorrect use of pointer dereference operator. The expression 'aws.Int32(10)' attempts to dereference the result of 'aws.Int32(10)', which returns a pointer. The field 'DelaySeconds' expects an 'int32', not a dereferenced pointer. Remove the '' operator and use 'aws.Int32(10)' directly, or use '10' as an int32 literal.

Suggested change
DelaySeconds: *aws.Int32(10),
DelaySeconds: aws.Int32(10),

Copilot uses AI. Check for mistakes.
MessageBody: &msg,
QueueUrl: &url,
})
Expand All @@ -50,11 +40,11 @@ func main() {
time.Sleep(5 * time.Second)
urlRep := queueURL + "/responseTopic"
log.Println("Receiving message from queue", urlRep)
var maxNumberOfMessages = int64(1)
var waitTimeSeconds = int64(5)
output, err := svc.ReceiveMessage(&sqs.ReceiveMessageInput{
MaxNumberOfMessages: &maxNumberOfMessages,
WaitTimeSeconds: &waitTimeSeconds,
var maxNumberOfMessages = int32(1)
var waitTimeSeconds = int32(5)
output, err := svc.ReceiveMessage(context.TODO(), &sqs.ReceiveMessageInput{
MaxNumberOfMessages: maxNumberOfMessages,
WaitTimeSeconds: waitTimeSeconds,
QueueUrl: &urlRep,
})
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion aws-sqs-http-connector/version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
v0.19
v0.20
Loading
Loading