#!/usr/bin/python
# -*- coding: UTF-8 -*-
from http import HTTPStatus

# Created by hadoop on 17-8-9.

import dashscope
import os
from dashscope import TextEmbedding
from dashvector import Client, Doc

# get env variable from .env
# please make sure DASHSCOPE_KEY is defined in .env
# load_dotenv()
dashscope.api_key = 'sk-73d63b18321142d4a54de172866d2573'

# initialize DashVector for embedding's indexing and searching
dashvector_client = Client(api_key='sk-9k1sIQJvHsfOV4BmcLDFKeYEcGQ3H97172D44E36311EEBB3F02F685E693CB',
                           endpoint='vrs-cn-g4t3nm78h0006h.dashvector.cn-hangzhou.aliyuncs.com')

# # define collection name
# collection_name = 'news_embeddings'
collection_name = 'tianlongbabu'

#
# # delete if already exist
dashvector_client.delete(collection_name)
#
# # create a collection with embedding size of 1536
rsp = dashvector_client.create(collection_name, 1536)
print("成功创建collection " + collection_name)
collection = dashvector_client.get(collection_name)


def prepare_data_from_dir(path, size):
    """
      对目录里面的文件进行迭代读取，每个文件作为一个chunk
    """
    # prepare the data from a file folder in order to upsert to DashVector with a reasonable doc's size.
    batch_docs = []
    for file in os.listdir(path):
        with open(path + '/' + file, 'r', encoding='utf-8') as f:
            batch_docs.append(f.read())  # 读取f里面的全部内容
            if len(batch_docs) == size:
                yield batch_docs[:]  # return a batch of docs
                batch_docs.clear()

    if batch_docs:
        yield batch_docs


def prepare_data_from_file(path, size):
    """
    对接文件按行读取，每12行作为一个chunk
    """
    # prepare the data from file in order to upsert to DashVector with a reasonable doc's size.
    # 生成器函数
    # [
    #   []，
    #   []
    # ]
    batch_docs = []
    chunk_size = 12  # 12行一个chunk
    with open(path, 'r', encoding='utf-8') as f:
        doc = ''
        count = 0
        # 逐行读取
        for line in f:
            if count < chunk_size and line.strip() != '':
                doc += line
                count += 1
            if count == chunk_size:
                batch_docs.append(doc)
                if len(batch_docs) == size:
                    yield batch_docs[:]
                    batch_docs.clear()
                doc = ''
                count = 0

    if batch_docs:
        yield batch_docs


def generate_embeddings(docs):
    # create embeddings via DashScope's TextEmbedding model API
    embedding_rps = TextEmbedding.call(model=TextEmbedding.Models.text_embedding_v1,
                                       input=docs)
    embeddings = [record['embedding'] for record in embedding_rps.output['embeddings']]
    return embeddings if isinstance(docs, list) else embeddings[0]


def upsert_and_index(dir_name_param):
    global collection
    start = 0
    # embedding api max batch size
    batch_size = 4
    # for news in list(prepare_data_from_dir(dir_name_param, batch_size)):  # 如果是目录
    for news in list(prepare_data_from_file(dir_name_param, batch_size)):  # 如果是文件
        ids = [start + i for i, _ in enumerate(news)]
        start += len(news)
        # generate embedding from raw docs
        vectors = generate_embeddings(news)  # 得到向量
        # upsert and index
        # 按照批次存入云端向量数据库
        ret = collection.upsert(
            [
                Doc(id=str(start), vector=vector, fields={"raw": doc})
                for start, doc, vector in zip(ids, news, vectors)
            ]
        )
        print(ret)

    print(collection.stats())


def search_relevant_context(question_param, top_k=1, client=dashvector_client):
    # query and recall the relevant information
    global collection

    # recall the top k similarity results from DashVector
    result = collection.query(generate_embeddings(question_param), output_fields=['raw'],
                              topk=top_k)
    print(result)

    return "".join([item.fields['raw'] for item in result.output])


if __name__ == '__main__':
    # dir_name = '/home/hadoop/.local/bin/my-project/CEC-Corpus/raw corpus/allSourceText'
    file_name = "/home/hadoop/IdeaProjects/语料/tianlongbabu_jinyong.txt"
    upsert_and_index(file_name)

    # query the top 1 results
    # question = '清华博士发生了什么？'
    # context = search_relevant_context(question, top_k=1)
    # print(context)
    pass
