package s3

import (
	"compress/gzip"
	"context"
	"encoding/csv"
	"fmt"
	"io"
	"strconv"
	"strings"
	"time"

	"gitee.com/bjf-fhe/apicat/entry"
	"gitee.com/bjf-fhe/apicat/utils"
	"github.com/aws/aws-sdk-go-v2/aws"
	"github.com/aws/aws-sdk-go-v2/service/s3"
	"github.com/sirupsen/logrus"
)

type Reader struct {
	tailMode   bool
	interval   time.Duration
	bucket     string
	client     *s3.Client
	msgChan    chan *entry.LogEntryResponse
	awsLogsDir string
	from       time.Time //读取的起始时间
}

func (r *Reader) Records() chan *entry.LogEntryResponse {
	r.msgChan = make(chan *entry.LogEntryResponse)
	go r.read()
	return r.msgChan
}

func (r *Reader) SearchAwsLogsDir(awsLogsDir string, ctx context.Context) {
	logrus.Errorln("确认AWSLogs目录所在位置")
	var continueToken *string
	var root = awsLogsDir
	if !strings.HasSuffix(root, "/") {
		root += "/"
	}
	for {
		output, err := r.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
			Bucket:            &r.bucket,
			ContinuationToken: continueToken,
			Delimiter:         &root,
		})
		if err == nil {
			if output.IsTruncated {
				continueToken = output.ContinuationToken
			} else {
				continueToken = nil
			}
		} else {
			r.msgChan <- &entry.LogEntryResponse{
				Error: &entry.LogError{
					Error: err,
				},
			}
			break
		}
		for _, cont := range output.Contents {
			logrus.Info("读取", *cont.Key)

			single, err := r.client.GetObject(ctx, &s3.GetObjectInput{
				Bucket: &r.bucket,
				Key:    cont.Key,
			})
			if err == nil {
				var rc io.ReadCloser
				fmt.Println(*single.ContentType)
				if strings.HasSuffix(*cont.Key, ".gz") {
					fz, err := gzip.NewReader(single.Body)
					if err == nil {
						rc = &utils.ZipReadCloser{
							Zip: fz,
							Low: single.Body,
						}
					}
				} else {
					rc = single.Body
				}
				r.readLogsFromReader(rc)
				break
			} else {
				r.msgChan <- &entry.LogEntryResponse{
					Error: &entry.LogError{
						Error: err,
					},
				}
				break
			}
		}
		if continueToken != nil {
			continue
		}
		if len(output.Contents) == 0 {
			logrus.Errorf("Bucket暂无新内容，%v后重试", r.interval)
		} else if continueToken == nil {
			//已经读取完毕
			logrus.Errorf("已完成Bucket内容读取，%v后重试", r.interval)
		}
		<-time.After(r.interval)
	}
}

func (r *Reader) read() {
	var ctx = context.Background()
	var continueToken *string
	var root = r.awsLogsDir
	var start = r.from
	var new_start = r.from
	for {
		output, err := r.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
			Bucket:            &r.bucket,
			ContinuationToken: continueToken,
			Prefix:            &root,
			// Delimiter: aws.String("/"),
		})
		if err == nil {
			if output.IsTruncated {
				continueToken = output.ContinuationToken
			} else {
				continueToken = nil
			}
		} else {
			r.msgChan <- &entry.LogEntryResponse{
				Error: &entry.LogError{
					Error: err,
				},
			}
			break
		}
		// fmt.Println(output)
		for _, prefix := range output.CommonPrefixes {
			fmt.Println(*prefix.Prefix)
		}
		for _, cont := range output.Contents {
			if cont.Size == 0 && strings.HasSuffix(*cont.Key, "/") {
				logrus.Info("跳过文件夹", *cont.Key)
			} else if cont.LastModified.Before(start) {
				logrus.Info("跳过老旧文件", *cont.Key)
			} else {
				if new_start.Before(*cont.LastModified) {
					new_start = *cont.LastModified
				}
				single, err := r.client.GetObject(ctx, &s3.GetObjectInput{
					Bucket: &r.bucket,
					Key:    cont.Key,
				})
				if err == nil {
					var rc io.ReadCloser
					fmt.Println(*single.ContentType)
					if strings.HasSuffix(*cont.Key, ".gz") {
						fz, err := gzip.NewReader(single.Body)
						if err == nil {
							rc = &utils.ZipReadCloser{
								Zip: fz,
								Low: single.Body,
							}
						}
					} else {
						rc = single.Body
					}
					r.readLogsFromReader(rc)
					break
				} else {
					r.msgChan <- &entry.LogEntryResponse{
						Error: &entry.LogError{
							Error: err,
						},
					}
					break
				}
			}
		}
		if continueToken != nil {
			continue
		}
		if len(output.Contents) == 0 {
			logrus.Error("Bucket暂无新内容")
		} else if continueToken == nil {
			//已经读取完毕
			logrus.Error("已完成Bucket内容读取")
		}
		if r.tailMode {
			logrus.Errorf("%v之后重试", r.interval)
			<-time.After(r.interval)
			start = new_start
		} else {
			break
		}
	}
}

func (r *Reader) readLogsFromReader(rc io.ReadCloser) {
	reader := csv.NewReader(rc)
	reader.Comma = ' '
	titles, err := reader.Read()
	for err == nil {
		r.msgChan <- &entry.LogEntryResponse{
			Entry: r.NewEntry(titles),
		}
		titles, err = reader.Read()
	}
	rc.Close()
}

func (r *Reader) NewEntry(rec []string) *entry.LogEntry {
	// https 2018-07-02T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188
	// 192.168.131.39:2817 10.0.0.1:80 0.086 0.048 0.037 200 200 0 57
	// "GET https://www.example.com:443/ HTTP/1.1" "curl/7.46.0" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2
	// arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067
	// "Root=1-58337281-1d84f3d73c47ec4e58577259" "www.example.com" "arn:aws:acm:us-east-2:123456789012:certificate/12345678-1234-1234-1234-123456789012"
	// 1 2018-07-02T22:22:48.364000Z "authenticate,forward" "-" "-" "10.0.0.1:80" "200" "-" "-"
	item := new(entry.LogEntry)
	entry.ParseRequest(rec[12], item)
	// item.Url = rec[12]
	if item.Protocol == "" {
		item.Protocol = rec[0]
	}
	item.StatusCode, _ = strconv.Atoi(rec[8])
	item.Client = strings.Split(rec[3], ":")[0]
	item.Created, _ = time.Parse(time.RFC3339, rec[1])
	return item
}

func NewReader(url string, cfg aws.Config, from time.Time, tailMode bool, interval time.Duration) (*Reader, error) {
	var bucket, path string
	if strings.Contains(url, ":") {
		//is arn
		parts := strings.SplitN(url, "/", 3)
		if len(parts) >= 2 {
			bucket = strings.Join(parts[0:2], "/")
			if len(parts) > 2 {
				path = parts[2]
			}
		}
	} else {
		bucket, path, _ = strings.Cut(url, "/")
	}
	logrus.WithField("Access Point", bucket).WithField("Path", path).Infoln("打开Aws S3", url)
	var reader = Reader{
		tailMode:   tailMode,
		interval:   interval,
		bucket:     bucket,
		awsLogsDir: path,
		client:     s3.NewFromConfig(cfg),
	}
	return &reader, nil
}
