// Copyright © 2025 Hardcore Engineering Inc.
//
// Licensed under the Eclipse Public License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. You may
// obtain a copy of the License at https://www.eclipse.org/legal/epl-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.

package storage

import (
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"io"
	"mime/multipart"
	"net/textproto"
	"net/url"
	"os"
	"path/filepath"
	"strconv"
	"strings"
	"time"

	"github.com/hcengineering/stream/internal/pkg/log"
	"github.com/hcengineering/stream/internal/pkg/tracing"
	"github.com/pkg/errors"
	"github.com/valyala/fasthttp"
	"go.opentelemetry.io/otel"
	"go.opentelemetry.io/otel/attribute"
	"go.opentelemetry.io/otel/trace"
	"go.uber.org/zap"
)

var tracer = otel.Tracer("storage.datalake")

type uploadResult struct {
	key   string
	error string
}

// DatalakeStorage represents datalake storage
type DatalakeStorage struct {
	baseURL   string
	workspace string
	token     string
	logger    *zap.Logger
	client    fasthttp.Client
}

// NewDatalakeStorage creates a new datalake client
func NewDatalakeStorage(ctx context.Context, baseURL, workspace, token string) Storage {
	return &DatalakeStorage{
		baseURL:   baseURL,
		token:     token,
		workspace: workspace,
		logger:    log.FromContext(ctx).With(zap.String("storage", "datalake")),
		client: fasthttp.Client{
			MaxIdleConnDuration: 5 * time.Second,
			MaxConnsPerHost:     100,
		},
	}
}

var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")

func escapeQuotes(s string) string {
	return quoteEscaper.Replace(s)
}

// multipart writer CreateFormFile function does not support custom content type
// here we have to have a modified copy that uses actual type instead of application/octet-stream
// see https://github.com/golang/go/issues/49329
func createFormFile(writer *multipart.Writer, fieldname, filename, contentType string) (io.Writer, error) {
	h := make(textproto.MIMEHeader)
	h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="%s"; filename="%s"`, escapeQuotes(fieldname), escapeQuotes(filename)))
	h.Set("Content-Type", contentType)
	return writer.CreatePart(h)
}

func getObjectKeyFromPath(s string) string {
	var _, objectKey = filepath.Split(s)
	return objectKey
}

// PutFile uploads file to the datalake
func (d *DatalakeStorage) PutFile(ctx context.Context, filename string, options PutOptions) error {
	_, span := tracer.Start(ctx, "datalake.put_file", trace.WithAttributes(
		attribute.String("workspace", d.workspace),
		attribute.String("object_key", getObjectKeyFromPath(filename)),
	))
	defer span.End()

	// #nosec
	file, err := os.Open(filename)
	if err != nil {
		return err
	}
	defer func() {
		_ = file.Close()
	}()

	var objectKey = getObjectKeyFromPath(filename)
	var logger = d.logger.With(zap.String("upload", d.workspace), zap.String("fileName", filename))

	logger.Debug("start")

	body := &bytes.Buffer{}
	writer := multipart.NewWriter(body)

	part, err := createFormFile(writer, "file", objectKey, getContentType(objectKey))
	if err != nil {
		tracing.RecordError(span, err)
		return errors.Wrapf(err, "failed to create form file")
	}

	if _, err := io.Copy(part, file); err != nil {
		tracing.RecordError(span, err)
		return errors.Wrapf(err, "failed to copy file data")
	}

	if err := writer.Close(); err != nil {
		tracing.RecordError(span, err)
		return errors.Wrapf(err, "failed to close multipart writer")
	}

	req := fasthttp.AcquireRequest()
	defer fasthttp.ReleaseRequest(req)

	res := fasthttp.AcquireResponse()
	defer fasthttp.ReleaseResponse(res)

	req.SetRequestURI(d.baseURL + "/upload/form-data/" + d.workspace)
	req.Header.SetMethod(fasthttp.MethodPost)
	req.Header.Add("Authorization", "Bearer "+d.token)
	req.Header.SetContentType(writer.FormDataContentType())
	if options.NoCache {
		req.Header.Add("Cache-Control", "max-age=0, must-revalidate")
	}
	req.SetBody(body.Bytes())

	if err := d.client.Do(req, res); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "upload failed", res)
		return errors.Wrapf(err, "upload failed")
	}

	var result []uploadResult
	if err := json.Unmarshal(res.Body(), &result); err != nil {
		tracing.RecordError(span, err)
		return errors.Wrapf(err, "parse error")
	}

	for _, res := range result {
		if res.error != "" {
			tracing.RecordError(span, err)
			return fmt.Errorf("upload error: %v %v", res.key, res.error)
		}
	}

	return nil
}

// DeleteFile deletes file from the datalake
func (d *DatalakeStorage) DeleteFile(ctx context.Context, filename string) error {
	_, span := tracer.Start(ctx, "datalake.delete_file", trace.WithAttributes(
		attribute.String("workspace", d.workspace),
		attribute.String("object_key", getObjectKeyFromPath(filename)),
	))
	defer span.End()

	var logger = d.logger.With(zap.String("delete", d.workspace), zap.String("fileName", filename))
	logger.Debug("start")

	var objectKey = getObjectKeyFromPath(filename)

	req := fasthttp.AcquireRequest()
	defer fasthttp.ReleaseRequest(req)

	res := fasthttp.AcquireResponse()
	defer fasthttp.ReleaseResponse(res)

	req.SetRequestURI(d.baseURL + "/blob/" + d.workspace + "/" + objectKey)
	req.Header.SetMethod(fasthttp.MethodDelete)
	req.Header.Add("Authorization", "Bearer "+d.token)

	if err := d.client.Do(req, res); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "delete failed", res)
		return errors.Wrapf(err, "delete failed")
	}

	if err := okResponse(res); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "bad status code", res)
		return err
	}

	return nil
}

// PatchMeta patches metadata for the object
func (d *DatalakeStorage) PatchMeta(ctx context.Context, filename string, md *Metadata) error {
	_, span := tracer.Start(ctx, "datalake.patch_meta", trace.WithAttributes(
		attribute.String("workspace", d.workspace),
		attribute.String("object_key", getObjectKeyFromPath(filename)),
	))
	defer span.End()

	var logger = d.logger.With(zap.String("patch meta", d.workspace), zap.String("fileName", filename))
	logger.Debug("start")
	defer logger.Debug("finished")

	var objectKey = getObjectKeyFromPath(filename)

	req := fasthttp.AcquireRequest()
	defer fasthttp.ReleaseRequest(req)
	req.SetRequestURI(d.baseURL + "/meta/" + d.workspace + "/" + objectKey)
	req.Header.SetMethod(fasthttp.MethodPatch)
	req.Header.Add("Authorization", "Bearer "+d.token)
	req.Header.SetContentType("application/json")

	b, err := json.Marshal(md)

	if err != nil {
		return err
	}
	req.SetBody(b)

	resp := fasthttp.AcquireResponse()
	defer fasthttp.ReleaseResponse(resp)

	if err := d.client.Do(req, resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "request failed", resp)
		return err
	}

	if err := okResponse(resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "bad status code", resp)
		return err
	}

	return nil
}

// GetMeta gets metadata related to the object
func (d *DatalakeStorage) GetMeta(ctx context.Context, filename string) (*Metadata, error) {
	_, span := tracer.Start(ctx, "datalake.put_file", trace.WithAttributes(
		attribute.String("workspace", d.workspace),
		attribute.String("object_key", getObjectKeyFromPath(filename)),
	))
	defer span.End()

	var logger = d.logger.With(zap.String("get meta", d.workspace), zap.String("fileName", filename))
	logger.Debug("start")

	var objectKey = getObjectKeyFromPath(filename)

	req := fasthttp.AcquireRequest()
	defer fasthttp.ReleaseRequest(req)
	req.SetRequestURI(d.baseURL + "/meta/" + d.workspace + "/" + objectKey)
	req.Header.SetMethod(fasthttp.MethodGet)
	req.Header.Add("Authorization", "Bearer "+d.token)

	resp := fasthttp.AcquireResponse()
	defer fasthttp.ReleaseResponse(resp)

	if err := d.client.Do(req, resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "request failed", resp)
		return nil, err
	}

	if err := okResponse(resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "bad status code", resp)
		return nil, err
	}

	var md Metadata
	var err = json.Unmarshal(resp.Body(), &md)

	return &md, err
}

// GetFile gets file from the storage
func (d *DatalakeStorage) GetFile(ctx context.Context, filename, destination string) error {
	_, span := tracer.Start(ctx, "datalake.get_file", trace.WithAttributes(
		attribute.String("workspace", d.workspace),
		attribute.String("object_key", getObjectKeyFromPath(filename)),
	))
	defer span.End()

	var logger = d.logger.With(zap.String("get", d.workspace), zap.String("fileName", filename), zap.String("destination", destination))
	logger.Debug("start")

	var objectKey = getObjectKeyFromPath(filename)

	req := fasthttp.AcquireRequest()
	defer fasthttp.ReleaseRequest(req)
	req.SetRequestURI(d.baseURL + "/blob/" + d.workspace + "/" + objectKey)
	req.Header.SetMethod(fasthttp.MethodGet)
	req.Header.Add("Authorization", "Bearer "+d.token)

	resp := fasthttp.AcquireResponse()
	defer fasthttp.ReleaseResponse(resp)

	if err := d.client.Do(req, resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "request failed", resp)
		return err
	}

	if err := okResponse(resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "bad status code", resp)
		return err
	}

	// #nosec
	file, err := os.Create(destination)
	if err != nil {
		tracing.RecordError(span, err)
		logger.Debug("can't create a file", zap.Error(err))
		return err
	}
	defer func() {
		_ = file.Close()
	}()
	if err = resp.BodyWriteTo(file); err != nil {
		tracing.RecordError(span, err)
		logger.Debug("can't write to file", zap.Error(err))
		return err
	}

	stat, err := os.Stat(destination)
	if err != nil {
		tracing.RecordError(span, err)
		logger.Error("can't stat the file", zap.Error(err))
		return err
	}

	logger.Info("file downloaded successfully", zap.Int64("size", stat.Size()))
	return nil
}

// StatFile gets file stat from the storage
func (d *DatalakeStorage) StatFile(ctx context.Context, filename string) (*BlobInfo, error) {
	_, span := tracer.Start(ctx, "datalake.stat_file", trace.WithAttributes(
		attribute.String("workspace", d.workspace),
		attribute.String("object_key", getObjectKeyFromPath(filename)),
	))
	defer span.End()

	var logger = d.logger.With(zap.String("head", d.workspace), zap.String("fileName", filename))
	logger.Debug("start")

	var objectKey = getObjectKeyFromPath(filename)

	req := fasthttp.AcquireRequest()
	defer fasthttp.ReleaseRequest(req)
	req.SetRequestURI(d.baseURL + "/blob/" + d.workspace + "/" + objectKey)
	req.Header.SetMethod(fasthttp.MethodHead)
	req.Header.Add("Authorization", "Bearer "+d.token)

	resp := fasthttp.AcquireResponse()
	defer fasthttp.ReleaseResponse(resp)

	if err := d.client.Do(req, resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "request failed", resp)
		return nil, err
	}

	if err := okResponse(resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "bad status code", resp)
		return nil, err
	}

	var info BlobInfo
	info.Size = int64(resp.Header.ContentLength())
	info.Type = string(resp.Header.ContentType())
	info.ETag = string(resp.Header.Peek("ETag"))

	logger.Debug("finished")
	return &info, nil
}

// SetParent updates blob parent reference
func (d *DatalakeStorage) SetParent(ctx context.Context, filename, parent string) error {
	_, span := tracer.Start(ctx, "datalake.set_parent", trace.WithAttributes(
		attribute.String("workspace", d.workspace),
		attribute.String("object_key", getObjectKeyFromPath(filename)),
	))
	defer span.End()

	var logger = d.logger.With(zap.String("workspace", d.workspace), zap.String("fileName", filename), zap.String("parent", parent))

	logger.Debug("start")

	var objectKey = getObjectKeyFromPath(filename)
	var parentKey = getObjectKeyFromPath(parent)

	if objectKey == parentKey {
		// no need to set parent for itself
		return nil
	}

	req := fasthttp.AcquireRequest()
	defer fasthttp.ReleaseRequest(req)
	req.SetRequestURI(d.baseURL + "/blob/" + d.workspace + "/" + objectKey + "/parent")
	req.Header.SetMethod(fasthttp.MethodPatch)
	req.Header.Add("Authorization", "Bearer "+d.token)
	req.Header.SetContentType("application/json")

	body := map[string]any{
		"parent": parentKey,
	}

	if err := json.NewEncoder(req.BodyWriter()).Encode(body); err != nil {
		tracing.RecordError(span, err)
		logger.Debug("can not encode body", zap.Error(err))
		return err
	}

	resp := fasthttp.AcquireResponse()
	defer fasthttp.ReleaseResponse(resp)

	if err := d.client.Do(req, resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "request failed", resp)
		return err
	}

	if err := okResponse(resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "bad status code", resp)
		return err
	}

	return nil
}

// MultipartUploadStart creates a new multipart upload
func (d *DatalakeStorage) MultipartUploadStart(ctx context.Context, objectName, contentType string) (string, error) {
	_, span := tracer.Start(ctx, "datalake.multipart_upload_start", trace.WithAttributes(
		attribute.String("workspace", d.workspace),
		attribute.String("object_key", objectName),
	))
	defer span.End()

	var logger = d.logger.With(zap.String("workspace", d.workspace), zap.String("objectName", objectName))
	url := fmt.Sprintf("%v/upload/multipart/%v/%v", d.baseURL, d.workspace, objectName)

	req := fasthttp.AcquireRequest()
	defer fasthttp.ReleaseRequest(req)
	req.SetRequestURI(url)
	req.Header.SetMethod(fasthttp.MethodPost)
	req.Header.Add("Authorization", "Bearer "+d.token)
	req.Header.SetContentType(contentType)

	resp := fasthttp.AcquireResponse()
	defer fasthttp.ReleaseResponse(resp)

	if err := d.client.Do(req, resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "request failed", resp)
		return "", err
	}

	if err := okResponse(resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "bad status code", resp)
		return "", err
	}

	var result struct {
		UploadID string `json:"uploadId"`
	}
	err := json.Unmarshal(resp.Body(), &result)

	return result.UploadID, err
}

// MultipartUploadPart uploads a part of a multipart upload
func (d *DatalakeStorage) MultipartUploadPart(ctx context.Context, objectName, uploadID string, partNumber int, data []byte) (*MultipartPart, error) {
	_, span := tracer.Start(ctx, "datalake.multipart_upload_part", trace.WithAttributes(
		attribute.String("workspace", d.workspace),
		attribute.String("object_key", objectName),
		attribute.Int("size", len(data)),
	))
	defer span.End()

	var logger = d.logger.With(zap.String("workspace", d.workspace), zap.String("uploadID", uploadID), zap.Int("partNumber", partNumber))
	params := url.Values{}
	params.Add("uploadId", uploadID)
	params.Add("partNumber", strconv.Itoa(partNumber))
	url := fmt.Sprintf("%v/upload/multipart/%v/%v/part?%v", d.baseURL, d.workspace, objectName, params.Encode())

	req := fasthttp.AcquireRequest()
	defer fasthttp.ReleaseRequest(req)
	req.SetRequestURI(url)
	req.Header.SetMethod(fasthttp.MethodPut)
	req.Header.Add("Authorization", "Bearer "+d.token)
	req.Header.SetContentType("application/octet-stream")
	req.Header.SetContentLength(len(data))
	req.SetBody(data)

	resp := fasthttp.AcquireResponse()
	defer fasthttp.ReleaseResponse(resp)

	if err := d.client.Do(req, resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "request failed", resp)
		return nil, err
	}

	if err := okResponse(resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "bad status code", resp)
		return nil, err
	}

	var part MultipartPart
	err := json.Unmarshal(resp.Body(), &part)

	return &part, err
}

// MultipartUploadComplete completes a multipart upload
func (d *DatalakeStorage) MultipartUploadComplete(ctx context.Context, objectName, uploadID string, parts []MultipartPart) error {
	_, span := tracer.Start(ctx, "datalake.multipart_upload_complete", trace.WithAttributes(
		attribute.String("workspace", d.workspace),
		attribute.String("object_key", objectName),
	))
	defer span.End()

	var logger = d.logger.With(zap.String("workspace", d.workspace), zap.String("uploadID", uploadID), zap.String("objectName", objectName))
	params := url.Values{}
	params.Add("uploadId", uploadID)
	url := fmt.Sprintf("%v/upload/multipart/%v/%v/complete?%v", d.baseURL, d.workspace, objectName, params.Encode())

	body, err := json.Marshal(map[string]any{
		"parts": parts,
	})

	if err != nil {
		logger.Debug("can not encode body", zap.Error(err))
		return err
	}

	req := fasthttp.AcquireRequest()
	defer fasthttp.ReleaseRequest(req)
	req.SetRequestURI(url)
	req.Header.SetMethod(fasthttp.MethodPost)
	req.Header.Add("Authorization", "Bearer "+d.token)
	req.Header.SetContentType("application/json")
	req.SetBody(body)

	resp := fasthttp.AcquireResponse()
	defer fasthttp.ReleaseResponse(resp)

	if err := d.client.Do(req, resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "request failed", resp)
		return err
	}

	if err := okResponse(resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "bad status code", resp)
		return err
	}

	return nil
}

// MultipartUploadCancel cancels a multipart upload
func (d *DatalakeStorage) MultipartUploadCancel(ctx context.Context, objectName, uploadID string) error {
	_, span := tracer.Start(ctx, "datalake.multipart_upload_cancel", trace.WithAttributes(
		attribute.String("workspace", d.workspace),
		attribute.String("object_key", objectName),
	))
	defer span.End()

	var logger = d.logger.With(zap.String("workspace", d.workspace), zap.String("uploadID", uploadID))
	params := url.Values{}
	params.Add("uploadId", uploadID)
	url := fmt.Sprintf("%v/upload/multipart/%v/%v/abort?%v", d.baseURL, d.workspace, objectName, params.Encode())

	req := fasthttp.AcquireRequest()
	defer fasthttp.ReleaseRequest(req)
	req.SetRequestURI(url)
	req.Header.SetMethod(fasthttp.MethodPost)
	req.Header.Add("Authorization", "Bearer "+d.token)

	resp := fasthttp.AcquireResponse()
	defer fasthttp.ReleaseResponse(resp)

	if err := d.client.Do(req, resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "request failed", resp)
		return err
	}

	if err := okResponse(resp); err != nil {
		tracing.RecordError(span, err)
		logRequestError(logger, err, "bad status code", resp)
		return err
	}

	return nil
}

func okResponse(res *fasthttp.Response) error {
	var statusOK = res.StatusCode() >= 200 && res.StatusCode() < 300

	if !statusOK {
		return fmt.Errorf("unexpected status code: %d", res.StatusCode())
	}

	return nil
}

func logRequestError(logger *zap.Logger, err error, msg string, res *fasthttp.Response) {
	logger.Error(
		msg,
		zap.Error(err),
		zap.Int("status", res.StatusCode()),
		zap.String("headers", res.Header.String()),
		zap.String("response", res.String()),
	)
}

var _ Storage = (*DatalakeStorage)(nil)
var _ MultipartStorage = (*DatalakeStorage)(nil)
var _ MetaProvider = (*DatalakeStorage)(nil)
