package iharbor2

import (
	"bytes"
	"context"
	"crypto/hmac"
	"crypto/md5"
	"crypto/sha1"
	"encoding/base64"
	"encoding/json"
	"errors"
	"fmt"
	"github.com/rclone/rclone/backend/iharbor2/api"
	"github.com/rclone/rclone/fs"
	"github.com/rclone/rclone/fs/config"
	"github.com/rclone/rclone/fs/config/configmap"
	"github.com/rclone/rclone/fs/config/configstruct"
	"github.com/rclone/rclone/fs/config/obscure"
	"github.com/rclone/rclone/fs/fserrors"
	"github.com/rclone/rclone/fs/fshttp"
	"github.com/rclone/rclone/fs/hash"
	"github.com/rclone/rclone/fs/walk"
	"github.com/rclone/rclone/lib/bucket"
	"github.com/rclone/rclone/lib/encoder"
	"github.com/rclone/rclone/lib/pacer"
	"github.com/rclone/rclone/lib/pool"
	"github.com/rclone/rclone/lib/rest"
	gohash "hash"
	"io"
	"net/http"
	"path"
	"regexp"
	"strconv"
	"strings"
	"sync"
	"time"
)

const (
	defaultEndpoint  = "https://obs.cstcloud.cn"
	minSleep         = 10 * time.Millisecond
	maxSleep         = 5 * time.Minute
	maxParts         = 10000
	maxVersions      = 100 // maximum number of versions we search in --b2-versions mode
	minChunkSize     = 5 * fs.Mebi
	defaultChunkSize = 10 * fs.Mebi
	//defaultUploadCutoff = 1 * fs.Gibi
	//defaultUploadCutoff = 100 * fs.Mebi
	defaultUploadCutoff = 500 * fs.Mebi
	largeFileCopyCutoff = 4 * fs.Gibi              // 5E9 is the max
	memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
	memoryPoolUseMmap   = false
	decayConstant       = 1    // bigger for slower decay, exponential
	version             = "v2" // 上传文件的接口 v1,v2. 默认v2
	defaultVersion      = "v1" // 除接口的上传操作外的其他接口都使用次版本
	SharedBucketToken   = ""
	numberOfConcurrent  = 5 // 限制并发数

)

// Options defines the configuration for this backend  选项定义此后端的配置 在cmd 中注册时显示的 字段名
type Options struct {
	Username     string        `config:"username"`
	Password     string        `config:"password"`
	Endpoint     string        `config:"endpoint"`
	Version      string        `config:"apiVersion"`
	UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` // 单个文件上传的最大字节，超过后分片上传
	//CopyCutoff      fs.SizeSuffix `config:"copy_cutoff"`
	ChunkSize           fs.SizeSuffix        `config:"chunk_size"`
	DisableCheckSum     bool                 `config:"disable_checksum"`
	MemoryPoolFlushTime fs.Duration          `config:"memory_pool_flush_time"`
	MemoryPoolUseMmap   bool                 `config:"memory_pool_use_mmap"`
	ShareToken          string               `config:"SharedBucketToken"`
	AccessKeyID         string               `config:"accessKeyID"`
	SecretAccessKey     string               `config:"secretAccessKey"`
	Token               string               `config:"token"`
	BlockConcurrency    bool                 `config:"block_concurrency"` // 数据是否块并发
	NumberOfConcurrent  int64                `config:"number_of_concurrent"`
	Enc                 encoder.MultiEncoder `config:"encoding"`
}

// Fs represents a remote Ihabor server   代表 服务器对象
type Fs struct {
	name            string         // name of this remote
	root            string         // the path we are working on if any
	opt             Options        // parsed config options
	ci              *fs.ConfigInfo // global config
	features        *fs.Features   // optional features
	srv             *rest.Client   // the connection to the Iharbor server
	client          *http.Client
	rootBucket      string                // bucket part of root (if any)
	rootDirectory   string                // directory part of root (if any)
	cache           *bucket.Cache         // cache for bucket creation status
	bucketIDMutex   sync.Mutex            // mutex to protect _bucketID
	_bucketID       map[string]int        // the ID of the bucket we are working on
	bucketTypeMutex sync.Mutex            // mutex to protect _bucketType
	_bucketType     map[string]string     // the Type of the bucket we are working on
	info            api.Token             // result of authorize call
	authMu          sync.Mutex            // lock for authorizing the account
	pacer           *fs.Pacer             // To pace and retry the API calls
	pool            *pool.Pool            // memory pool
	uploadToken     *pacer.TokenDispenser // control concurrency
	user            string
	pass            string
	mutex           sync.Mutex
}

// Object describes a iharbor object
type Object struct {
	fs          *Fs       // what this object is part of
	remote      string    // The remote path
	Name        string    // iharbor Name of the file
	id          int       // iharbor int of the file
	modTime     time.Time // The modified time of the object if known
	MD5         string    // MD5 hash if known
	size        int64     // Size of the object
	mimeType    string    // Content-Type of the object
	DownloadUrl api.File
}

func (o *Object) ID() string {
	return strconv.Itoa(o.id)
}

func (o *Object) MimeType(ctx context.Context) string {
	return o.mimeType
}

// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
//
// SHA-1 will also be updated once the request has completed.
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
	// The error is logged in readMetaData
	_ = o.readMetaData(ctx)
	return o.modTime
}

// Hash returns the Sha-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
	if t != hash.MD5 {
		return "", hash.ErrUnsupported
	}
	if o.MD5 == "" {
		// Error is logged in readMetaData
		err := o.readMetaData(ctx)
		if err != nil {
			return "", err
		}
	}
	return o.MD5, nil
}

// timeString returns modTime as the number of milliseconds
// elapsed since January 1, 1970 UTC as a decimal string.
func timeString(modTime time.Time) string {
	return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
}

func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
	return nil
}

// openFile represents an Object open for reading
type openFile struct {
	o     *Object        // Object we are reading for
	resp  *http.Response // response of the GET
	body  io.Reader      // reading from here
	hash  gohash.Hash    // currently accumulating SHA1
	bytes int64          // number of bytes read on this connection
	eof   bool           // whether we have read end of file
}

// newOpenFile wraps an io.ReadCloser and checks the sha1sum
func newOpenFile(o *Object, resp *http.Response) *openFile {
	file := &openFile{
		o:    o,
		resp: resp,
		hash: md5.New(),
	}

	file.body = io.TeeReader(resp.Body, file.hash)
	return file
}

//// Read bytes from the object - see io.Reader
func (file *openFile) Read(p []byte) (n int, err error) {

	n, err = file.body.Read(p)
	file.bytes += int64(n)
	if err == io.EOF {
		file.eof = true
	}
	return
}

// Close the object and checks the length and SHA1 if all the object
// was read
func (file *openFile) Close() (err error) {
	// Close the body at the end
	defer fs.CheckClose(file.resp.Body, &err)

	// If not end of file then can't check SHA1
	if !file.eof {
		return nil
	}

	// Check to see we read the correct number of bytes
	if file.o.Size() != file.bytes {
		return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
	}
	// Check the SHA1
	receivedSHA1 := file.o.MD5
	calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
	if receivedSHA1 == "" {
		receivedSHA1 = calculatedSHA1
		file.o.MD5 = calculatedSHA1
	}
	if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
		return fmt.Errorf("object corrupted on transfer - MD5 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
	}

	return nil
}

// Check it satisfies the interfaces
var _ io.ReadCloser = &openFile{}

func (o *Object) readRemoutFileInfo(ctx context.Context, bucket, bucketpath string) (resp *http.Response, info *api.File, err error) {
	var opts rest.Opts
	Path := "/api/" + defaultVersion + "/metadata/" + bucket + "/" + bucketpath + "/"
	opts = o.fs.HandlerAccesskeyHeader("GET", Path, 3600, nil, nil,
		nil, nil)

	//opts = rest.Opts{
	//	Method:  "GET",
	//	Path:    Path,
	//	RootURL: o.fs.opt.Endpoint + "/api/" + defaultVersion,
	//}

	var response *api.Metadata
	err = o.fs.pacer.Call(func() (bool, error) {
		resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
		return o.fs.shouldRetry(ctx, resp, err)
	})
	if err != nil {
		matchString, _ := regexp.MatchString("目录路径不存在", err.Error())
		matchString, _ = regexp.MatchString("对象或目录不存在", err.Error())
		if matchString == true {
			return resp, nil, nil
		}
		return resp, nil, err
	}
	if resp == nil || response == nil {
		return nil, nil, errors.New("Data cannot be obtained. Check whether the service is running properly.")
	}

	return resp, &response.Obj, nil

}

//Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
	bucket, bucketPath := o.split()
	var offset, count int64 = 0, -1
	fs.FixRangeOption(options, o.size)
	for _, option := range options {
		switch x := option.(type) {

		case *fs.RangeOption:
			offset, count = x.Decode(o.size)
			if count < 0 {
				count = o.size - offset
			}
		default:
			if option.Mandatory() {
				fs.Logf(o, "Unsupported mandatory option: %v", option)
			}
		}
	}
	var Path string
	if count == -1 {
		Path = "/api/" + defaultVersion + "/obj/" + bucket + "/" + bucketPath + "/"

	} else {
		Path = "/api/" + defaultVersion + "/obj/" + bucket + "/" + bucketPath + "/?offset=" + strconv.FormatInt(offset, 10) + "&size=" + strconv.FormatInt(count, 10)
	}
	var resp *http.Response

	// http://10.0.90.221:86/api/v1/obj/test-bucket-s3/rclone.1/?offset=0&size=10
	//resp, info, err := o.getOrHead(ctx, "GET", options)

	opts := o.fs.HandlerAccesskeyHeader("GET", Path, 3600, nil, nil, nil, nil)

	err = o.fs.pacer.Call(func() (bool, error) {
		resp, err = o.fs.srv.Call(ctx, &opts)
		return o.fs.shouldRetry(ctx, resp, err)
	})

	if err != nil {
		return nil, err
	}
	return resp.Body, err
	//return newOpenFile(o, resp), err
}

// 备份
//Open an object for read
//func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
//	fs.FixRangeOption(options, o.size)
//	var resp *http.Response
//	resp, info, err := o.getOrHead(ctx, "GET", options)
//	err = o.decodeMetaData(info)
//	if err != nil {
//		_ = resp.Body.Close()
//		return nil, err
//	}
//	o.Name = info.Name
//	o.size = info.Size
//	o.modTime = o.fs.stringToTime(info.ReviseFileTime)
//	return newOpenFile(o, resp), nil
//
//}

func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
	//fmt.Println("Update")
	fs.Debugf(o, "upload files : %s ", src.String())
	size := src.Size() // size可能为 -1 md5String 可能为空
	//fmt.Printf("上传文件大小 %d \n", size)
	//md5String, err := src.Hash(ctx, hash.MD5)
	// centos9 挂载 使用cp 上传时出现文件大小和md5无法读取
	//if md5String == "" {
	//	//fmt.Printf("MD5 为空 \n")
	//	var buf bytes.Buffer
	//	in = io.TeeReader(in, &buf)
	//	md5 := md5.New()
	//	io.Copy(md5, &buf)
	//	md5String = hex.EncodeToString(md5.Sum(nil))
	//	size = int64(buf.Len())
	//	//fmt.Printf("size设置的大小 %d \n", size)
	//}
	var hashmd5 string

	fs.Debugf(o, "需计算，请稍等。")

	if size == -1 || size > int64(o.fs.opt.UploadCutoff) {
		// 文件大于 500MB
		// 分段上传
		if o.fs.opt.BlockConcurrency {
			// 并发
			err, hashmd5 = o.fs.fileMultipartUpload2(ctx, o, in, src, o.fs.opt.ChunkSize)
			if err != nil {
				fs.Errorf(o, "Upload Error : File multipart upload failure, waiting for reupload")
				return err
			} else {
				fs.Debugf(o, "Upload Success : File multipart upload complete.")
			}
		} else {
			// 分块上传无并发
			err, hashmd5 = o.fs.fileMultipartUpload(ctx, o, in, src, o.fs.opt.ChunkSize)
			if err != nil {
				fs.Errorf(o, "Upload Error : File multipart upload failure, waiting for reupload.")
				//fmt.Println("上传失败，重传--------------------")
				return err
			} else {
				fs.Debugf(o, "Upload Success : File multipart upload complete.")
			}
		}

		// 后续更改成文件分块的md5总和。
		return o.decodeMetaDataFileInfo(src.String(), size, hashmd5, "multipart/form-data", time.Now())
	}
	var contentType string
	md5String, err := src.Hash(ctx, hash.MD5)
	if o.fs.opt.Version == "v1" {
		if size == 0 {
			err := o.fs.V1EmptyFileUpdate(ctx, o, in, size)
			if err != nil {
				return err
			}
		} else {
			bodyBufer, contentType, err := o.fs.V1PutUploadBuffer(ctx, o, in, src.String())
			if err != nil {
				fs.Debugf(o, "缓冲区写入错误。")
				return err
			}

			err = o.fs.V1FileUpload(ctx, o, bodyBufer, contentType, md5String)
			if err != nil {
				return err
			}
		}

	} else {
		err := o.fs.V2FileUpload(ctx, o, in, md5String, size)
		if err != nil {
			//fs.Debugf(o, "文件上传失败，请重新上传！")
			return err
		}

	}
	if contentType == "" {
		contentType = ""
	}
	//fmt.Printf("上传完成 : %s \n", src.String())
	return o.decodeMetaDataFileInfo(src.String(), size, md5String, contentType, time.Now())

}

// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
//
// Sets
//  o.id
//  o.modTime
//  o.size
//  o.sha1
func (o *Object) decodeMetaDataFileInfo(name string, size int64, md5 string, contentType string, reviserfiletime time.Time) (err error) {
	//fmt.Println("decodeMetaDataFileInfo")
	return o.decodeMetaDataRaw(name, size, md5, contentType, reviserfiletime)
}

// 规定 按名称删除
func (o *Object) Remove(ctx context.Context) error {
	bucket, bucketPath := o.split()
	err := o.fs.deleteFile(ctx, bucket, bucketPath, bucketPath)
	if err != nil {
		return err
	}
	k := strings.TrimRight(bucketPath, "/")
	num := strings.LastIndex(k, "/")
	if num != -1 {
		// 如果删除文件后目录为空则删除当前目录
		err = o.fs.RemoveEmptyDirectory(ctx, bucket, bucketPath[:num], "")
		if err != nil {
			return err
		}
	}

	return nil
}

func (o *Object) String() string {
	if o == nil {
		return "<nil>"
	}
	return o.remote
}

func (o *Object) Remote() string {
	return o.remote
}

// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
	return o.size
}

// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
	return o.fs
}

// Storable returns if this object is storable
func (o *Object) Storable() bool {
	return true
}

// ----------------------------------------------------------------------------------------------------
func (f *Fs) Name() string {
	return f.name
}

func (f *Fs) Root() string {
	return f.root
}

func (f *Fs) String() string {
	if f.rootBucket == "" {
		return fmt.Sprintf("Iharbor root")
	}
	if f.rootDirectory == "" {
		//fmt.Printf("f.rootBucket, f.rootDirectory = %s , %s \n", f.rootBucket, f.rootDirectory)
		return fmt.Sprintf("Iharbor bucket %s", f.rootBucket)
	}
	return fmt.Sprintf("Iharbor bucket %s path %s", f.rootBucket, f.rootDirectory)
}

func (f *Fs) Precision() time.Duration {
	return time.Millisecond
}

// 设置本地文件校验的形式
func (f *Fs) Hashes() hash.Set {
	return hash.Set(hash.MD5)
}

// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
	return f.features
}

// NewObject finds the Object at remote.  If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
	//fmt.Println("NewObject")
	return f.newObjectWithInfo(ctx, remote, nil)
}

// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
	//fmt.Println("Put")
	// Temporary Object under construction
	fs := &Object{
		fs:     f,
		remote: src.Remote(),
	}
	return fs, fs.Update(ctx, in, src, options...)
}

// 以前是创建桶，修改成创建目录
func (f *Fs) Mkdir(ctx context.Context, dir string) error {

	bucket, bucketPath := f.split(dir)
	b, err := f.doesTheBucketExist(ctx, bucket)
	if err != nil {
		// 查看桶是否存在 抛出其他异常信息
		return err
	}
	if b == true {
		// 桶存在
		// 查看目录是否存在
		if bucketPath != "" {
			c, err := f.doesTheDirectoryExist(ctx, bucket, bucketPath)
			if err != nil {
				return err
			}
			if c == true {
				// 目录存在
				fmt.Println("The directory exists and cannot be created.")
				return nil
			}
			if c == false && err == nil {
				// 目录不存在 ，创建
				directory, err := f.createDirectory(ctx, bucket, bucketPath)
				if directory == true && err == nil {
					fmt.Println("Directory created successfully.")
					return nil
				}
				if err != nil {
					return err
				}
			}
		}
		return errors.New("the bucket exists and cannot be created repeatedly")
	}
	if b == false && err == nil {
		// 桶不存在 创建桶
		err := f.makeBucket(ctx, bucket, bucketPath)
		if err != nil {
			return err
		}
		// 创建 目录
		if bucketPath != "" {
			directory, err := f.createDirectory(ctx, bucket, bucketPath)
			if directory == true && err == nil {
				fmt.Println("Directory created successfully")
				return nil
			}
			if err != nil {
				return err
			}
		}
	}

	return nil
}

// clearBucketID clears the ID for the current bucket name
func (f *Fs) clearBucketID(bucket string) {
	f.bucketIDMutex.Lock()
	delete(f._bucketID, bucket)
	f.bucketIDMutex.Unlock()
}

// clearBucketType clears the Type for the current bucket name
func (f *Fs) clearBucketType(bucket string) {
	f.bucketTypeMutex.Lock()
	delete(f._bucketType, bucket)
	f.bucketTypeMutex.Unlock()
}

// Rmdir deletes the bucket if the fs is at the root
// Returns an error if it isn't empty
// 删除桶（前提是空桶）
// 以前是删除桶， 现在修改成删除目录
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
	_, directory := f.split(dir)
	if directory == "" {
		return errors.New("please go to the https://service.cstcloud.cn to remove the bucket")
	}
	return f.purge(ctx, dir, false)

	//return errors.New("please go to the https://service.cstcloud.cn to remove the bucket")
	//bucket, directory := f.split(dir)
	//if bucket == "" || directory != "" {
	//	return nil
	//}
	//return f.cache.Remove(bucket, func() error {
	//	bucketID, err := f.getBucketID(ctx, bucket)
	//	if err != nil {
	//		fs.Debugf(nil, "该桶不存在")
	//		return err
	//	}
	//	opts := rest.Opts{
	//		Method:  "DELETE",
	//		RootURL: f.opt.Endpoint,
	//		Path:    "/api/" + defaultVersion + "/buckets/" + bucketID + "/",
	//	}
	//	var response api.Error
	//	err = f.pacer.Call(func() (bool, error) {
	//		resp, err := f.srv.CallJSON(ctx, &opts, nil, &response)
	//		if err == io.EOF {
	//			fs.Debugf(nil, "因响应超时等原因，请用户到对应服务平台查看是否删除该存储桶。如果没有生效请重新输入命令。")
	//			fmt.Printf("%s 桶已删除! \n", bucket)
	//			err = nil
	//		}
	//		return f.shouldRetry(ctx, resp, err)
	//	})
	//	if err != nil {
	//		return fmt.Errorf("failed to delete bucket: %w", err)
	//	}
	//	f.clearBucketID(bucket)
	//	f.clearBucketType(bucket)
	//	//f.clearUploadURL(bucketID)
	//
	//	return nil
	//})
}

// purge deletes all the files and directories
//
// if oldOnly is true then it deletes only non current files.
//
// Implemented here so we can make sure we delete old versions.
// 删除目录和文件

//  使用该命令是起作用 强制删除目录及目录下的内容
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
	//directory 代表删除 某一目录的名称
	bucket, directory := f.split(dir)
LOOP:
	// 循环删除文件
	err := f.TargetDeleteFile(ctx, bucket, directory, directory)
	if err != nil {
		return err
	}

	// 删除空目录
	err = f.traverseDirectoryTest(ctx, bucket, directory, directory)
	if err != nil {
		goto LOOP
	}
	return nil
}

// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error {
	return f.purge(ctx, dir, false)
}

// CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error {
	fmt.Printf("CleanUp \n")
	return f.purge(ctx, "", true)
}

// PublicLink returns a link for downloading without account
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
	bucket, bucketPath := f.split(remote)
	fmt.Printf("PublicLink \n")

	fmt.Printf("bucket = %s , bucketPath = %s \n", bucket, bucketPath)
	return link, nil
}

// -------------------------------------------------------------

func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
	if cs < opt.ChunkSize {
		return fmt.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
	}
	return nil
}

func checkUploadChunkSize(cs fs.SizeSuffix) error {
	if cs < minChunkSize {
		return fmt.Errorf("%s is less than %s", cs, minChunkSize)
	}
	return nil
}

// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
	f.root = parsePath(root)
	f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}

// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
	root = strings.Trim(path, "/")
	return
}

func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
	//if resp == nil {
	//	return false, nil
	//}

	return fserrors.ShouldRetry(err), err
}

// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
	401, // Unauthorized (e.g. "Token has expired")
	408, // Request Timeout
	429, // Rate exceeded.
	500, // Get occasional 500 Internal Server Error
	503, // Service Unavailable
	504, // Gateway Time-out
}

// shouldRetryNoAuth returns a boolean as to whether this resp and err
// deserve to be retried.  It returns the err as a convenience
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
	//fmt.Printf("shouldRetryNoReauth 部分 ")
	if fserrors.ContextError(ctx, &err) {
		return false, err
	}
	//fmt.Printf("shouldRetryNoReauth 部分 one \n ")
	// For 429 or 503 errors look at the Retry-After: header and
	// set the retry appropriately, starting with a minimum of 1
	// second if it isn't set.
	if resp != nil && (resp.StatusCode == 429 || resp.StatusCode == 503) {
		//fmt.Printf("shouldRetryNoReauth 部分 two \n ")
		var retryAfter = 1
		// 未设置 重试标头 默认是 1
		//retryAfterString := resp.Header.Get(retryAfterHeader)
		//fmt.Printf("shouldRetryNoReauth 部分 retryAfterString = %s  \n ", retryAfterString)
		//if retryAfterString != "" {
		//	var err error
		//	retryAfter, err = strconv.Atoi(retryAfterString)
		//	//fmt.Printf("shouldRetryNoReauth 部分 retryAfterString -> retryAfter = %s  \n ", retryAfter)
		//	if err != nil {
		//		fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
		//	}
		//}
		return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
	}
	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}

// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
	return bucket.Split(path.Join(f.root, rootRelativePath))
}

// List the objects and directories in dir into entries.  The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.

// rclone lsd drive:test  查看存储桶
// -1 2016-10-17 17:41:53        -1 1000files
// -1 2017-01-03 14:40:54        -1 2500files
// -1 2017-07-08 14:39:28        -1 4000files

func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
	//fmt.Println("List")

	bucket, directory := f.split(dir)
	//fmt.Printf("lsd: bucket %s , directory %s \n", bucket, directory)
	if bucket == "" {
		if directory != "" {
			return nil, fs.ErrorListBucketRequired
		}
		return f.listBuckets(ctx)
	}

	return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}

// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
	//fmt.Println("ListR")
	bucket, directory := f.split(dir)
	//fmt.Printf("ls: bucket %s , directory %s \n", bucket, directory)
	list := walk.NewListRHelper(callback)
	listR := func(bucket, directory, prefix string, addBucket bool) error {
		last := ""
		return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, false, false, func(remote string, object *api.File, isDirectory bool) error {
			entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
			if err != nil {
				return err
			}
			return list.Add(entry)
		})
	}
	if bucket == "" {
		entries, err := f.listBuckets(ctx)
		if err != nil {
			return err
		}
		for _, entry := range entries {
			err = list.Add(entry)
			if err != nil {
				return err
			}
			bucket := entry.Remote()
			err = listR(bucket, "", f.rootDirectory, true)
			if err != nil {
				return err
			}
			// bucket must be present if listing succeeded
			f.cache.MarkOK(bucket)
		}
	} else {
		err := listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
		if err != nil {
			return err
		}
		// bucket must be present if listing succeeded
		f.cache.MarkOK(bucket)
	}
	return list.Flush()
}

// listBucketFn is called from listBucketsToFn to handle a bucket
type listBucketFn func(*api.Bucket) error

// listBucketsToFn lists the buckets to the function supplied
// 函数功能 列出桶
func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
	//fmt.Println("listBucketsToFn")
	//var account api.ListBucketsRequest
	offset := int64(0)
	page_limit := int64(1000)

LOOP:

	Path := "/api/" + defaultVersion + "/buckets/?limit=" +
		strconv.FormatInt(page_limit, 10) + "&offset=" + strconv.FormatInt(offset, 10)
	var response api.ListBucketsResponse
	var opts rest.Opts
	opts = f.HandlerAccesskeyHeader("GET", Path, 3600, nil, nil,
		nil, nil)

	//opts = rest.Opts{
	//	Method:  "GET",
	//	Path:    Path,
	//	RootURL: f.opt.Endpoint,
	//}

	err := f.pacer.Call(func() (bool, error) {
		resp, err := f.srv.CallJSON(ctx, &opts, nil, &response)
		return f.shouldRetry(ctx, resp, err)
	})
	if err != nil {
		return err
	}

	if response.Count == 0 {
		fs.Errorf(f, "还没有创建存储桶，请先创建桶！！")
	} else {
		f.bucketIDMutex.Lock()
		f.bucketTypeMutex.Lock()
		f._bucketID = make(map[string]int, 1)
		f._bucketType = make(map[string]string, 1)
		for i := range response.Buckets {
			bucket := &response.Buckets[i]
			bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
			f.cache.MarkOK(bucket.Name)
			f._bucketID[bucket.Name] = bucket.ID
			f._bucketType[bucket.Name] = bucket.Type
		}
		f.bucketTypeMutex.Unlock()
		f.bucketIDMutex.Unlock()
		for i := range response.Buckets {
			bucket := &response.Buckets[i]
			err = fn(bucket)
			if err != nil {
				return err
			}
		}
	}

	if response.Next == "" {
		return nil
	}
	offset = page_limit
	page_limit = page_limit + 1000

	goto LOOP
}

// getBucketID finds the ID for the current bucket name  看后续需求
// 思路： 因 后面使用该函时没有正确的获取id 因此需要 通过 API 获取正确的id 并于 f._bucketID[bucket] 对比 返回正确的id
func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, err error) {
	err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
		//fmt.Printf("调用 listBucketsToFn \n")
		// listBucketsToFn sets IDs
		return nil
	})
	//f.bucketIDMutex.Lock()
	//bucketID = strconv.Itoa(f._bucketID[bucket])
	//fmt.Printf("id = %s \n", bucketID)
	//f.bucketIDMutex.Unlock()
	//if bucketID != "" {
	//	fmt.Printf("桶为空 \n")
	//
	//	return bucketID, nil
	//}

	f.bucketIDMutex.Lock()
	bucketID = strconv.Itoa(f._bucketID[bucket])
	//fmt.Printf("获取的id 为 %s \n", bucketID)
	f.bucketIDMutex.Unlock()
	if bucketID == "" {
		//fmt.Printf("桶为空 \n")
		err = fs.ErrorDirNotFound
	}
	//fmt.Printf("桶为空，返回 \n")
	return bucketID, err
}

// errEndList is a sentinel used to end the list iteration now.
// listFn should return it to end the iteration with no errors.
var errEndList = errors.New("end list")

// listFn is called from list to handle an object
// 从列表中调用 listFn 来处理一个对象
type listFn func(remote string, object *api.File, isDirectory bool) error

// list lists the objects into the function supplied from
// the bucket and root supplied
// list 将对象列出到由bucket和root提供的函数中
//
// (bucket, directory) is the starting directory (bucket, directory) 是起始目录
//
// If prefix is set then it is removed from all file names 如果设置了前缀，那么它将从所有文件名中删除
//
// If addBucket is set then it adds the bucket to the start of the
// remotes generated  如果设置了 addBucket 则它将存储桶添加到生成的遥控器的开头
//
// If recurse is set the function will recursively list  如果设置了递归，该函数将递归列出
//
// If limit is > 0 then it limits to that many files (must be less
// than 1000)  如果限制大于 0，则限制为那么多文件（必须小于 1000）
//
// If hidden is set then it will list the hidden (deleted) files too.
// 如果设置了隐藏，那么它也会列出隐藏（删除）的文件。
//
// if findFile is set it will look for files called (bucket, directory)
// 如果设置了 findFile，它将查找名为 (bucket, directory) 的文件

func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int, hidden bool, findFile bool, fn listFn) error {
	//fmt.Println("list")
	if !findFile {
		if directory != "" {
			// 除去directory 末尾的 /
			directory = strings.TrimRight(directory, "/")
			directory += "/"
		}
	}
	var request = api.ListFileNamesRequest{
		BucketName: bucket,
		DirPath:    directory,
	}
	//if directory != "" {
	//	request.StartFileName = f.opt.Enc.FromStandardPath(directory)
	//}
	offset := int64(0)
	page_limit := int64(2000)

LOOP:

	var opts rest.Opts
	size := int64(0)
	Path := ""
	//fmt.Println("directory = ", directory)
	if directory == "" {
		Path = "/api/" + defaultVersion + "/dir/" + bucket + "/" + directory + "?offset=" +
			strconv.FormatInt(offset, 10) + "&limit=" + strconv.FormatInt(page_limit, 10)
	} else {
		directory = strings.TrimRight(directory, "/")
		Path = "/api/" + defaultVersion + "/dir/" + bucket + "/" + directory + "/?offset=" +
			strconv.FormatInt(offset, 10) + "&limit=" + strconv.FormatInt(page_limit, 10)
	}

	opts = f.HandlerAccesskeyHeader("GET", Path, 3600, nil, &size, nil, nil)
	//opts = rest.Opts{
	//	Method:        "GET",
	//	Path:          Path,
	//	RootURL:       f.opt.Endpoint + "/api/" + defaultVersion,
	//	ContentLength: &size,
	//}

	for {

		var response api.TraverseDirectory
		err := f.pacer.Call(func() (bool, error) {
			resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
			return f.shouldRetry(ctx, resp, err)
		})
		//fmt.Printf("err = %s \n ", err)
		if err != nil {
			return err
		}

		for i := range response.Files {
			file := &response.Files[i]
			//remote := file.Name
			//remote := file.AllFilePath // 改1
			file.Name = f.opt.Enc.ToStandardPath(file.AllFilePath)
			//fmt.Println("fileName ", file.Name)

			if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
				return nil
			}
			if !strings.HasPrefix(file.Name, prefix) {
				fs.Debugf(f, "Odd name received %q", file.Name)
				continue
			}
			// /dasd: Entry doesn't belong in directory "" (contains subdir) - ignoring

			remote := strings.TrimLeft(file.Name[len(prefix):], "/")
			//fmt.Println("list remote = ", remote)

			//fmt.Println("remote ", file.Name)
			var isDirectory bool
			if addBucket {
				remote = path.Join(bucket, remote)
			}
			// 目录
			if file.IsFileOrDir == false {
				isDirectory = true
			} else {
				isDirectory = false
			}
			err = fn(remote, file, isDirectory)
			if err != nil {
				if err == errEndList {
					return nil
				}
				return err
			}
		}

		//end if no NextFileName
		//fmt.Printf("response.Next = %s", response.Next)
		if response.Next == "" {
			//fmt.Printf("response.NextFileName = %s \n ", response.NextFileName)
			break
			//return err
		}
		offset = page_limit
		page_limit = page_limit + 2000
		goto LOOP
	}
	return nil
}

// decodeMetaData sets the metadata in the object from an api.File
//
// Sets
//  o.id
//  o.modTime
//  o.size
//  o.MD5
func (o *Object) decodeMetaData(info *api.File) (err error) {
	//fmt.Println("decodeMetaData")
	if info != nil {
		return o.decodeMetaDataRaw(info.Name, info.Size, info.MD5, info.ContentType, o.fs.stringToTime(info.ReviseFileTime))
	}
	return nil
}

// Clean the MD5  // 目前未用
//
// Make sure it is lower case
//
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (e.g. Cyberduck) use this
//func cleanMD5(sha1 string) (out string) {
//	out = strings.ToLower(sha1)
//	const unverified = "unverified:"
//	if strings.HasPrefix(out, unverified) {
//		out = out[len(unverified):]
//	}
//	return out
//}

// decodeMetaDataRaw sets the metadata from the data passed in
//
// Sets
//  o.id
//  o.modTime
//  o.size
//  o.sha1
func (o *Object) decodeMetaDataRaw(Name string, size int64, MD5 string, mimeType string, revisefiletime time.Time) (err error) {
	//fmt.Println("decodeMetaDataRaw")
	//fmt.Println("Name = ", Name)

	o.Name = Name

	o.MD5 = MD5

	o.mimeType = mimeType

	o.size = size
	// Use the UploadTimestamp if can't get file info
	o.modTime = revisefiletime
	return nil

}

// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
	return o.fs.split(o.remote)
}

// noNeedToEncode is a bitmap of characters which don't need % encoding
var noNeedToEncode [256]bool

// urlEncode encodes in with % encoding
func urlEncode(in string) string {
	var out bytes.Buffer
	for i := 0; i < len(in); i++ {
		c := in[i]
		if noNeedToEncode[c] {
			_ = out.WriteByte(c)
		} else {
			_, _ = out.WriteString(fmt.Sprintf("%%%2X", c))
		}
	}
	return out.String()
}

func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
	// 如果下面能够正确的请求，则目录和文件将无法显示（排除在外）
	//fmt.Println("getOrHead")

	bucket, bucketPath := o.split()
	method = "GET"
	resp, info, err = o.readRemoutFileInfo(ctx, bucket, bucketPath)
	if err != nil {
		return nil, nil, err
	}
	// 目录
	if info != nil && info.IsFileOrDir == false {
		return nil, nil, fs.ErrorObjectNotFound
	}
	if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
		return nil, nil, fs.ErrorObjectNotFound
	}
	//ci := fs.GetConfig(ctx)
	//
	//streams := info.Size / int64(ci.MultiThreadCutoff)
	//// With maximum
	//if streams > int64(ci.MultiThreadStreams) {
	//	streams = int64(ci.MultiThreadStreams)
	//}
	//if streams < 2 {
	//	streams = 2
	//}
	//var opts rest.Opts
	//if o.fs.opt.ShareToken != "" || o.fs.opt.Token != "" {
	//	Path := "/api/" + defaultVersion + "/obj/" + bucket + "/" + bucketPath + "/"
	//	opts = o.fs.HandlerAccesskeyHeader(method, Path, 3600, nil, nil, nil, nil)
	//
	//}
	////else {
	////	//fmt.Println("info ", info)
	////	var path_of_url string
	////	if info != nil {
	////		opts.RootURL = info.DownLoadUrl
	////		//fmt.Println("下载地址", info.DownLoadUrl)
	////		if info.DownLoadUrl != "" {
	////			// 中文名字文件下载处理
	////			//path_of_url = o.fs.opt.Enc.FromStandardPath(info.DownLoadUrl[len(o.fs.opt.Endpoint):])
	////			sp := strings.SplitAfterN(info.DownLoadUrl[len(o.fs.opt.Endpoint):], "/", -1)
	////			j := strings.Join(sp[:len(sp)-1], "")
	////			sp2 := strings.SplitAfterN(bucketPath, "/", -1)
	////			path_of_url = j + sp2[len(sp2)-1]
	////		} else {
	////			path_of_url = "/api/" + defaultVersion + "/obj/" + bucket + "/" + bucketPath + "/"
	////		}
	////
	////	}
	////
	////	opts = o.fs.HandlerAccesskeyHeader(method, path_of_url, 3600, nil, nil, nil, nil)
	////}
	//
	////opts := rest.Opts{
	////	Method:     method,
	////	Options:    options,
	////	NoResponse: method == "HEAD",
	////}
	//
	////if o.fs.opt.ShareToken != "" {
	////	opts.RootURL = o.fs.opt.Endpoint + "/api/" + defaultVersion + "/obj/"
	////	opts.RootURL = opts.RootURL + bucket + "/" + bucketPath + "/"
	////} else {
	////	opts.RootURL = info.DownLoadUrl
	////}
	//
	//err = o.fs.pacer.Call(func() (bool, error) {
	//	resp, err = o.fs.srv.Call(ctx, &opts)
	//	//fmt.Println("下载请求 err ", err)
	//	return o.fs.shouldRetry(ctx, resp, err)
	//})
	//if err != nil && err != io.EOF {
	//	//fmt.Printf("err: %s \n", err)
	//	// 404 for files, 400 for directories
	//	//fmt.Println("下载请求状态码 err ", resp.StatusCode)
	//	if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
	//		return nil, nil, fs.ErrorObjectNotFound
	//	}
	//	return nil, nil, fmt.Errorf("failed to %s for download: %w", method, err)
	//}

	return nil, info, nil
}

// getMetaDataListing gets the metadata from the object unconditionally from the listing
//
// Note that listing is a class C transaction which costs more than
// the B transaction used in getMetaData
func (o *Object) getMetaDataListing(ctx context.Context) (info *api.File, err error) {
	//fmt.Println("getMetaDataListing")
	bucket, bucketPath := o.split()
	maxSearched := 1
	//var timestamp api.Timestamp

	//timestamp, bucketPath = api.RemoveVersion(bucketPath)
	maxSearched = maxVersions

	err = o.fs.list(ctx, bucket, bucketPath, "", false, true, maxSearched, false, true, func(remote string, object *api.File, isDirectory bool) error {
		if isDirectory {
			return nil
		}
		if remote == bucketPath {
			//if !timestamp.IsZero() && !timestamp.Equal(api.TFormatTime(object.UploadTimestamp)) {
			//	return nil
			//}
			info = object
		}
		return errEndList // read only 1 item
	})
	if err != nil {
		if err == fs.ErrorDirNotFound {
			return nil, fs.ErrorObjectNotFound
		}
		return nil, err
	}
	if info == nil {
		return nil, fs.ErrorObjectNotFound
	}
	return info, nil
}

// getMetaData gets the metadata from the object unconditionally
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {

	//fmt.Println("getMetaData")
	_, info, err = o.getOrHead(ctx, "HEAD", nil)

	return info, err
}

// readMetaData gets the metadata if it hasn't already been fetched
//
// Sets
//  o.id
//  o.modTime
//  o.size
//  o.sha1
func (o *Object) readMetaData(ctx context.Context) (err error) {
	//fmt.Println("readMetaData")
	if o.Name != "" {
		return nil
	}
	info, err := o.getMetaData(ctx)
	if err != nil {
		return err
	}
	return o.decodeMetaData(info)
}

// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) {
	//fmt.Println("ListR-list-itemToDirEntry-newObjectWithInfo")
	o := &Object{
		fs:     f,
		remote: remote,
	}
	//if info == nil {
	//	fmt.Println("remote=", remote)
	//} else {
	//	fmt.Println("remote=", remote, "filename = ", info.Name)
	//}
	if info != nil {
		//  解码元数据
		err := o.decodeMetaData(info)
		if err != nil {
			return nil, err
		}
	} else {
		// reads info and headers, returning an error  读取信息和标题，返回错误
		err := o.readMetaData(ctx)
		if err != nil {
			return nil, err
		}
	}
	return o, nil
}

func (f *Fs) stringToTime(str string) time.Time {
	//stringtime := "2022-04-08T17:47:30.162485+08:00"
	const TIME_LAYOUT = "2006-01-02T15:04:05+08:00"
	//t, _ := time.Parse(TIME_LAYOUT, str) // 时间 多 8 h
	t, _ := time.ParseInLocation(TIME_LAYOUT, str, time.Local) // 与本地时区同步

	return t
}

// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
	//fmt.Println("itemToDirEntry")
	if isDirectory {
		// 除去字符前面 / 的影响, 否则 lsd 无法查看子目录
		remote = strings.TrimLeft(remote, "/")
		//fmt.Println("itemToDirEntry itemToDirEntry= ", isDirectory, "remote= ", remote)

		d := fs.NewDir(remote, f.stringToTime(object.ReviseFileTime))
		return d, nil
	}
	if remote == *last {
		remote = ""
	} else {
		remote = strings.TrimLeft(remote, "/")
		*last = remote
	}

	o, err := f.newObjectWithInfo(ctx, remote, object)
	if err != nil {
		return nil, err
	}
	return o, nil
}

// listBuckets returns all the buckets to out
// 所有的桶
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
	//fmt.Println("listBuckets")
	err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
		d := fs.NewDir(bucket.Name, f.stringToTime(bucket.CreatedTime))
		entries = append(entries, d)
		return nil
	})
	if err != nil {
		return nil, err
	}
	return entries, nil
}

//listDir lists a single directory  列出单个目录
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
	//fmt.Println("listDir")
	last := ""
	err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, false, false, func(remote string, object *api.File, isDirectory bool) error {
		entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
		//fmt.Printf("entry 列出目录 %s \n", entry)
		if err != nil {
			return err
		}
		if entry != nil {
			entries = append(entries, entry)
			//fmt.Printf("entries 列出目录 %s \n", entries)
		}
		return nil
	})
	if err != nil {
		return nil, err
	}
	// bucket must be present if listing succeeded  如果列表成功，bucket 必须存在
	//f.cache.MarkOK(bucket)
	return entries, nil
}

// setBucketID sets the ID for the current bucket name
func (f *Fs) setBucketID(bucket string, ID int) {
	//fmt.Printf("创建存储桶 id 并缓存")
	f.bucketIDMutex.Lock()
	f._bucketID[bucket] = ID
	f.bucketIDMutex.Unlock()
}

// makeBucket creates the bucket if it doesn't exist
// 如果桶不存在，makeBucket 创建桶
func (f *Fs) makeBucket(ctx context.Context, bucket, bucketPath string) error {
	return errors.New("please go to the https://service.cstcloud.cn to create the bucket")
	//Path := "/buckets/"
	//// 桶不存在
	//err := f.cache.Create(bucket, func() error {
	//	opts := rest.Opts{
	//		Method: "POST",
	//		Path:   Path,
	//	}
	//	var request = api.CreateBucketRequest{
	//		Name: f.opt.Enc.FromStandardName(bucket),
	//	}
	//	var response api.Bucket
	//	err := f.pacer.Call(func() (bool, error) {
	//		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
	//		return f.shouldRetry(ctx, resp, err)
	//	})
	//	if err != nil {
	//
	//		return fmt.Errorf("failed to create bucket or  you have reached the limit on the number of buckets you can have: %w", err)
	//	} else {
	//		fmt.Println(bucket, "桶创建完成")
	//	}
	//
	//	//f.setBucketID(bucket, response.ID)
	//	//f.getBucketID(ctx, bucket)
	//	return nil
	//}, nil)
	//if err != nil {
	//	return err
	//}
	//return nil
}

// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
	return f.Put(ctx, in, src, options...)
}

// -------------------------------------------------------------

func init() {
	fs.Register(&fs.RegInfo{
		Name:        "Iharbor",
		Description: "Iharbor Cloud Object Storage System (OSS) ",
		NewFs:       NewFs,
		// -->  {{},{},{}}
		Config: Config,
		Options: []fs.Option{
			{
				Name: "endpoint",
				Help: "Endpoint for the service.",
			}, {
				Name: "apiVersion",
				Help: `Select the interface for uploading files,
v1 will not upload empty files. v2 is used by default`,
				Default: version,
			}, {
				Name: "upload_cutoff",
				Help: `Cutoff for switching to chunked upload.
This value should be set no larger than 4.657 GiB (== 5 GB).`,
				Default:  defaultUploadCutoff,
				Advanced: true,
			},
			//		{
			//			Name: "copy_cutoff",
			//			Help: `Cutoff for switching to multipart copy.
			//
			//Any files larger than this that need to be server-side copied will be
			//copied in chunks of this size.
			//
			//The minimum is 0 and the maximum is 4.6 GiB.`,
			//			Default:  largeFileCopyCutoff,
			//			Advanced: true,
			//		},
			{
				Name: "chunk_size",
				Help: `Upload chunk size.

When uploading large files, chunk the file into this size.

Must fit in memory. These chunks are buffered in memory and there
might a maximum of "--transfers" chunks in progress at once.

5,000,000 Bytes is the minimum size.`,
				Default:  defaultChunkSize,
				Advanced: true,
			},
			{
				Name: "disable_checksum",
				Help: `Disable checksums for large (> upload cutoff) files.

Normally rclone will calculate the MD5 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
				Default:  false,
				Advanced: true,
			},
			{
				Name:     "block_concurrency",
				Help:     `Data block concurrent operations`,
				Default:  false,
				Advanced: true,
			},
			{
				Name:     "number_of_concurrent",
				Help:     "Number of concurrent file uploads.Default: 5",
				Default:  numberOfConcurrent,
				Advanced: true,
			},
			//{
			//	Name:     "memory_pool_flush_time",
			//	Default:  memoryPoolFlushTime,
			//	Advanced: true,
			//	Help: `How often internal memory buffer pools will be flushed.
			//Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
			//This option controls how often unused buffers will be removed from the pool.`,
			//}, {
			//	Name:     "memory_pool_use_mmap",
			//	Default:  memoryPoolUseMmap,
			//	Advanced: true,
			//	Help:     `Whether to use mmap buffers in internal memory pool.`,
			//},
			{
				Name:     config.ConfigEncoding,
				Help:     config.ConfigEncodingHelp,
				Advanced: true,
				// See: https://www.backblaze.com/b2/docs/files.html
				// Encode invalid UTF-8 bytes as json doesn't handle them properly.
				// FIXME: allow /, but not leading, trailing or double
				Default: (encoder.Display |
					encoder.EncodeBackSlash |
					encoder.EncodeInvalidUtf8),
			},
		},
	})

}

func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
	switch config.State {
	case "":
		return fs.ConfigChooseFixed("permission_selection", "User Credentials", "Authentication Type", []fs.OptionExample{{
			Value: "shared_bucket_login",
			Help:  "Use the shared bucket token to log in, the user can only have the operation permission of the shared bucket",
		}, {
			Value: "user_access_key_login",
			Help:  "Log in using the access keys AccessKeyID and SecretAccessKey",
		}, {
			Value: "user_token_login",
			Help:  "Log in using the token",
		}, {
			Value: "quit",
			Help:  "quit",
		},
		})
	case "permission_selection":
		// Jump to next state according to config chosen
		return fs.ConfigGoto(config.Result)
	case "shared_bucket_login":

		return fs.ConfigInput("shared_token", "SharedBucketToken", "Please enter the shared bucket token!")
	case "shared_token":
		m.Set("SharedBucketToken", config.Result)
		m.Set("token", "")
		m.Set("accessKeyID", "")
		m.Set("secretAccessKey", "")
		return fs.ConfigGoto("end")
	case "user_access_key_login":
		return fs.ConfigInput("user_accessKeyID", "accessKeyID", "access_key ")
	case "user_accessKeyID":
		m.Set("accessKeyID", config.Result)
		return fs.ConfigInput("user_secretAccessKey", "secretAccessKey", "secret_key")
	case "user_secretAccessKey":
		m.Set("secretAccessKey", config.Result)
		m.Set("SharedBucketToken", "")
		m.Set("token", "")
		return fs.ConfigGoto("end")
	case "user_token_login":
		return fs.ConfigInput("user_token", "token", "token ")
	case "user_token":
		m.Set("token", config.Result)
		m.Set("SharedBucketToken", "")
		m.Set("accessKeyID", "")
		m.Set("secretAccessKey", "")
		return fs.ConfigGoto("end")
	case "quit":
		return fs.ConfigGoto("end")
	case "end":
		// All the config flows end up here in case we need to carry on with something
		return nil, nil

	}
	return nil, fmt.Errorf("unknown state %q", config.State)
}

// name 远程路径名  root    mapper  init 中的配置
func NewFs(ctx context.Context, name string, root string, mapper configmap.Mapper) (fs.Fs, error) {

	opt := new(Options) // 分配内存地址

	err := configstruct.Set(mapper, opt) // 实现了  fmt.Scanner 接口  类似 python input  》》》 小名   -->  name = 小名

	if err != nil {
		return nil, err
	}

	if opt.UploadCutoff < opt.ChunkSize {
		opt.UploadCutoff = opt.ChunkSize
		fs.Infof(nil, "Iharbor: raising upload cutoff to chunk size: %v", opt.UploadCutoff)
	}
	err = checkUploadCutoff(opt, opt.UploadCutoff) // 检查  opt.UploadCutoff 是否 更改成  opt.ChunkSize 的大小 ，否则 报错
	if err != nil {
		return nil, fmt.Errorf("Iharbor: upload cutoff: %w", err)
	}
	err = checkUploadChunkSize(opt.ChunkSize) //  检查 该块大小 与定义的 最小块 比较
	if err != nil {
		return nil, fmt.Errorf("Iharbor: chunk size: %w", err)
	}

	if opt.Endpoint == "" {
		opt.Endpoint = defaultEndpoint
	} else {
		// 输入url地址最后多 /
		if strings.HasSuffix(opt.Endpoint, "/") {
			strings.TrimRight(opt.Endpoint, "/")
			opt.Endpoint = strings.TrimRight(opt.Endpoint, "/")
		}

	}
	if opt.Version == "" {
		opt.Version = version
	}

	ci := fs.GetConfig(ctx) // 全局相关的上下文

	// Fs 赋值
	f := &Fs{
		name: name, // name of this remote
		opt:  *opt, // Option 配置
		ci:   ci,   // 全局配置
		//srv:  rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler), // 连接服务
		srv:         rest.NewClient(fshttp.NewClient(ctx)), // 连接服务
		cache:       bucket.NewCache(),                     // 缓存桶创建状态
		_bucketID:   make(map[string]int, 1),
		_bucketType: make(map[string]string, 1),
		user:        opt.Username,
		pass:        opt.Password,
		pacer:       fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
		//uploadToken: pacer.NewTokenDispenser(ci.Transfers),
		//pool: pool.New(
		//	time.Duration(opt.MemoryPoolFlushTime),
		//	int(opt.ChunkSize),
		//	ci.Transfers,
		//	opt.MemoryPoolUseMmap,
		//),
	}

	f.setRoot(root) // 调整 根路径

	// 配置 可选项 类似 权限
	f.features = (&fs.Features{
		ReadMimeType:      true,
		WriteMimeType:     true,
		BucketBased:       true,
		BucketBasedRootOK: true,
	}).Fill(ctx, f)
	// 暂时 取消 APIURL 中 "/api/" + defaultVersion
	APIURL := f.opt.Endpoint
	if opt.ShareToken != "" {
		//APIURL := f.opt.Endpoint + "/api/" + defaultVersion

		f.info.Token = opt.ShareToken
		f.srv.SetRoot(APIURL).SetHeader("Authorization", "BucketToken "+f.info.Token)
	}
	if opt.Token != "" {
		//APIURL := f.opt.Endpoint + "/api/" + defaultVersion

		f.info.Token = opt.Token
		f.srv.SetRoot(APIURL).SetHeader("Authorization", "Token "+f.info.Token)
	}

	if f.rootBucket != "" && f.rootDirectory != "" {

		// Check to see if the (bucket,directory) is actually an existing file
		oldRoot := f.root
		newRoot, leaf := path.Split(oldRoot)
		//fmt.Println("newRoot, leaf = ", newRoot, leaf, oldRoot)
		// 本插件 最初流程 NewObject -> readMetaData -> getMetaData -> getOrHead 先去获取信息
		// 如果文目录 返回 err 然后 重新设置 fs cache: renaming cache item "iharbor:test-bucket-s3/www/" to be canonical "iharbor:test-bucket-s3/www"
		f.setRoot(newRoot)
		_, err := f.NewObject(ctx, leaf)
		if err != nil {
			// File doesn't exist so return old f
			// 重新设置 根 fs cache: renaming cache item "iharbor:test-bucket-s3/www/" to be canonical "iharbor:test-bucket-s3/www"
			f.setRoot(oldRoot)
			// 之后 通过 List 或 ListR 查询信息
			return f, nil
		}
		// return an error with an fs which points to the parent
		return f, fs.ErrorIsFile
	}
	return f, nil
}

func (f *Fs) authorizeAccount(ctx context.Context) error {
	f.authMu.Lock()
	defer f.authMu.Unlock()
	APIURL := f.opt.Endpoint + "/api/" + defaultVersion
	// 解密
	reveal, err := obscure.Reveal(f.opt.Password)
	if err != nil {
		return err
	}
	request := api.InfoPerson{
		Username: f.opt.Username,
		Password: reveal,
	}

	opts := rest.Opts{
		Method:  "POST",
		Path:    "/api/" + defaultVersion + "/auth-token/",
		RootURL: f.opt.Endpoint,
	}
	// 考虑版本
	var response api.AuthorizeAccountResponse
	err = f.pacer.Call(func() (bool, error) {
		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
		if err != nil {
			fmt.Println("token err :", err)
		}
		return f.shouldRetryNoReauth(ctx, resp, err)
	})
	if err != nil {
		return fmt.Errorf("failed to authenticate: %w", err)
	}
	f.info.Token = response.Token.Key
	f.srv.SetRoot(APIURL).SetHeader("Authorization", "Token "+response.Token.Key)
	return nil

}

type jsonBodyStruct struct {
	PathOfURL string `json:"path_of_url"`
	Method    string `json:"method"`
	Deadline  int64  `json:"deadline"`
}

func (f *Fs) AccessKeyIDSecretAccessKeyHandler(accesskey string, secretaccesskey string,
	method string, path_of_url string, timedelta int64) string {
	//fmt.Println("key 路径： %s", path_of_url)
	currentTime := time.Now()
	//endTime := time.Date(currentTime.Year(), currentTime.Month(), currentTime.Day(), 23, 59, 59, 0, currentTime.Location())
	//deadline := endTime.Unix()
	deadline := currentTime.Unix() + timedelta

	body := jsonBodyStruct{PathOfURL: path_of_url, Method: method, Deadline: deadline}
	data, _ := json.Marshal(body)
	dataBase64 := base64.URLEncoding.EncodeToString(data)

	h := hmac.New(sha1.New, []byte(secretaccesskey))
	h.Write([]byte(dataBase64))
	key := base64.URLEncoding.EncodeToString(h.Sum(nil))

	return fmt.Sprintf("%s %s:%s:%s", "evhb-auth", accesskey, key, dataBase64)
}

func MapToJson(param map[string][]string) string {
	dataString := "?"
	for key, value := range param {
		dataString = dataString + key + "=" + value[0] + "&"
	}
	dataString = dataString[0 : len(dataString)-1]
	return dataString
}

func (f *Fs) HandlerAccesskeyHeader(method, path_of_url string, timedelta int64, parameters map[string][]string,
	contentLength *int64, extraheaders map[string]string, body io.Reader) rest.Opts {
	//fmt.Printf("key的路径： %s \n", path_of_url)
	var opts rest.Opts
	if f.opt.AccessKeyID != "" && f.opt.SecretAccessKey != "" {
		if parameters != nil {
			param := MapToJson(parameters)
			path_of_url = path_of_url + param
		}
		auth := f.AccessKeyIDSecretAccessKeyHandler(f.opt.AccessKeyID, f.opt.SecretAccessKey, method, path_of_url, timedelta)
		if extraheaders != nil {
			extraheaders["Authorization"] = auth
		} else {
			extraheaders = map[string]string{"Authorization": auth}
		}
		opts = rest.Opts{
			Method:  method,
			Path:    path_of_url,
			RootURL: f.opt.Endpoint,
			//Trailer: &http.Header{"Authorization": []string{auth}},
			ExtraHeaders: extraheaders,
			//Parameters:    parameters,
			ContentLength: contentLength,
			Body:          body,
		}
	} else {
		opts = rest.Opts{
			Method:        method,
			Path:          path_of_url,
			ExtraHeaders:  extraheaders,
			Parameters:    parameters,
			ContentLength: contentLength,
			Body:          body,
		}
	}
	return opts
}

// Check the interfaces are satisfied
var (
	_ fs.Fs     = &Fs{}
	_ fs.Purger = &Fs{}
	//_ fs.Copier       = &Fs{}
	_ fs.PutStreamer  = &Fs{}
	_ fs.CleanUpper   = &Fs{}
	_ fs.ListRer      = &Fs{}
	_ fs.PublicLinker = &Fs{}
	_ fs.Object       = &Object{}
	_ fs.MimeTyper    = &Object{}
	_ fs.IDer         = &Object{}
)
