import pandas as pd
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "1,2"
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from knowledge1.models import Base, Project, Config, Detail, Label, Source
import uuid
import time
from datetime import datetime
import json
from knowledge1.project_file.conn_milvus import MilvusDB
from tqdm import tqdm
import copy
from text_embedding import chatlaw_text2vec_encode, m3e_encode, bge_encode, text2vec_encode
from advanced_module import SingleReranker
from pypinyin import pinyin, Style
import numpy as np
from django.db.models import Q

from logging import getLogger
logger = getLogger("log")

import yaml
with open("configs/search_pipline.yaml", "r", encoding="utf-8") as f:
    search_pipline_paras = yaml.safe_load(f)

base_emb_dict = {'刑法': 'chatlaw', '民法': 'chatlaw', '法律': 'chatlaw'}
embedding_dim = {'chatlaw': 768, 'm3e-base': 768, 'bge-base': 768, 'text2vec_large': 1024}
# embedding_dim = {obj.name: int(obj.dims) for obj in Config.objects.all()}

default_emb = 'm3e-base'

embedding_model_dict = {
    "chatlaw": chatlaw_text2vec_encode,
    "bge-base": bge_encode,
    "m3e-base": m3e_encode,
    "text2vec_large": text2vec_encode
}


def choose_model(embedding_type):
    return embedding_model_dict.get(embedding_type, m3e_encode)


def init_verctor_store():
    global db, global_project_name_list
    db = {}
    collection_dict = Base.objects.filter(status=1).values('project__name', 'embedding__name').distinct()
    global_project_name_list = list(set([str(item['project__name'])+'_'+str(embedding_dim[item['embedding__name']])
                                         for item in collection_dict]))
    # global_project_name_list = []

    for collection_name in global_project_name_list:
        dim = int(collection_name.split('_')[-1])
        db_IP_name = collection_name+'_IP'
        db_L2_name = collection_name+'_L2'
        db_IP = MilvusDB(collection_name=db_IP_name, index_type='IP',dimension=dim)
        db_L2 = MilvusDB(collection_name=db_L2_name, index_type='L2',dimension=dim)
        db.update({f'IP_{collection_name}': db_IP, f'L2_{collection_name}': db_L2})
    # 显示所有列
    print(global_project_name_list)


def sort_by_pinyin(s: str):
    lz = list(s.split(','))
    # 转换为拼音排序, 链接: https://pypinyin.readthedocs.io/zh_CN/master/api.html#pypinyin.Style.TONE3
    lz.sort(key=lambda keys: [pinyin(i, style=Style.TONE3) for i in keys])
    # print(lz)
    return ','.join(lz)


db = {}
init_verctor_store()


class SqlStorage:
    def insert_multi_dataframe(self, tmp_data, tqdm_bar, comment, source2id):
        batch_size = 2000
        insert_data_list = []

        embedding = tmp_data.embedding.to_list()[0]
        embedding_dict = {'name': embedding, 'dims': embedding_dim.get(embedding)}
        '''
        embedding 当前embedding 已唯一
        '''
        tmp_embedding, _ = Config.objects.get_or_create(**embedding_dict)
        if not _:
            pass
        else:
            logger.info(f'未存在{embedding_dict["name"]}')

        res_df = []
        for (tmp_base_name, tmp_base_type, source, project_name), tmp_df in tmp_data.groupby(['base_name', 
                                                                                              'base_type',
                                                                                              'source',
                                                                                              'project_name'
                                                                                              ]):
            project_dict = {'name': project_name}
            project_res, _ = Project.objects.get_or_create(**project_dict)
            Project.objects.filter(name=project_name).update(status=1)

            base_dict = {'name': tmp_base_name, 'embedding': tmp_embedding, 'project': project_res}
            base_qs = Base.objects.filter(name=tmp_base_name, project__name=project_name)
            if base_qs.exists():
                base = base_qs[0]
                if base.status == 0:
                    base.status = 1
                if tmp_base_type and tmp_base_type != "":
                    base.category = tmp_base_type
                base.save()
            else:
                if tmp_base_type and tmp_base_type != "":
                    base_dict["category"] = tmp_base_type
                base, _ = Base.objects.get_or_create(**base_dict)
            if source2id:
                source_dict = {'name': source, 'uid': source2id[source]}
            else:
                source_dict = {'name': source}
            source_res, _ = Source.objects.get_or_create(**source_dict)
            uid_list = []
            for idx, col in tmp_df.iterrows():
                # uid = datetime.now().strftime('%Y%m%d%H%M%S') + uuid.uuid4().hex
                # todo :每条判断是否重复

                query = Detail.objects.filter(base=base,
                                              text=col['text'],
                                              embedding=tmp_embedding,
                                              status=1)
                query_uid = Detail.objects.filter(uid=col['uid'],
                                              status=1)
                tmp_uid = col['uid']
                if not query.exists() and not query_uid.exists():
                    new_detail = Detail(uid=col['uid'],
                                        base=base,
                                        text=col['text'],
                                        detail_text=str(col['detail']),
                                        source=source_res,
                                        embedding=tmp_embedding,
                                        meta=col['meta_data'],
                                        parent_id=col['parent_id'])
                    if comment:
                        new_detail.comment = comment
                    insert_data_list.append(new_detail)
                else:
                    tmp_uid = 'exist'

                uid_list.append(tmp_uid)
                tqdm_bar.update()
                if len(insert_data_list) >= batch_size:
                    obj = Detail.objects.bulk_create(insert_data_list)
                    logger.info(f'插入{batch_size}条数据')
                    insert_data_list = []

            if insert_data_list:
                obj = Detail.objects.bulk_create(insert_data_list)
                logger.info(f'插入{len(insert_data_list)}条数据')
                insert_data_list = []

            tmp_df['uid'] = uid_list
            res_df.append(copy.deepcopy(tmp_df))

        return pd.concat(res_df)


@csrf_exempt
def get_label(request):
    start_time = time.time()
    count = 0
    try:
        source = json.loads(request.POST.get('source', None))
        # label 列表
        labels = json.loads(request.POST.get('label', None))
        # 根据逗号切分
        labels = list(labels.replace('，', ',').split(','))
        project_name = json.loads(request.POST.get('project', None))

        project_dict = {'name': project_name}
        project_res, _ = Project.objects.get_or_create(**project_dict)

        source_dict = {'name': source}
        source_res, _ = Source.objects.get_or_create(**source_dict)

        for label in labels:
            label_dict = {'name': label, 'project': project_res, 'source': source_res}
            label_res, _ = Label.objects.get_or_create(**label_dict)
            if _:
                count += 1
        logger.info(f"插入{count}条标签")
    except Exception as e:
        end_time = time.time()
        return JsonResponse({
            'error': f'{e}',
            'time': str(end_time - start_time)
        })
    end_time = time.time()
    return JsonResponse({
        'result': f'共{len(labels)}条标签,其中有效数据{count}条, 成功写入数据库{count}条数据',
        'time': str(end_time-start_time)
    })

@csrf_exempt
def get_data(request):
    # global history
    start_time = time.time()
    sql_manager = SqlStorage()

    text = json.loads(request.POST.get('text', None))
    uid = request.POST.get('text_uid', None)
    if uid:
        uid = json.loads(uid)
        uid = [str(u) for u in uid]
    assert type(text) == list, 'text type must be str or list, if you choose multi data, type text must be list'
    embedding_list = json.loads(request.POST.get('embedding', None))
    meta_data = json.loads(request.POST.get('meta_data', None))
    source = json.loads(request.POST.get('source', None))
    source_id = request.POST.get('source_id', None)
    if source_id:
        source_id = json.loads(source_id)
        source2id = {s: s_id for s, s_id in zip(source, source_id)}
    else:
        source2id = None
    detail = json.loads(request.POST.get('detail_text', None))
    base_name = json.loads(request.POST.get('base', None))
    base_type = json.loads(request.POST.get('base_type', None))
    project_name = json.loads(request.POST.get('project', None))
    sim_text = request.POST.get('sim_text', None)
    if sim_text:
        sim_text = json.loads(sim_text)
        if not isinstance(sim_text[0][0], dict):
            valid_sim_text = []
            for s in sim_text:
                temp = []
                for i in range(len(s)):
                    extend_id = datetime.now().strftime('%Y%m%d%H%M%S') + uuid.uuid4().hex
                    temp.append({"extend_id": extend_id, "extend_question": s[i]})

                valid_sim_text.append(temp)
            sim_text = valid_sim_text
    update_answer = request.POST.get('update_answer', False)
    is_normalize = request.POST.get('is_normalize', True)
    if is_normalize == "false":
        is_normalize = False
    comment = request.POST.get('comment', None)
        
    if update_answer:
        num = 0
        for i in range(len(text)):
            base = Base.objects.filter(name=base_name[i], project__name=project_name[i])
            main_query = Detail.objects.filter(text=text[i], base=base[0])
            if main_query.exists():
                for k in range(len(main_query)):
                    main_question = main_query[k]
                    uid = main_question.uid
                    answer = main_question.detail_text

                    main_question.detail_text = detail[i]
                    main_question.save_origin()
                    num += 1

                    child_question_qs = Detail.objects.filter(detail_text=answer, parent_id=uid)
                    num += len(child_question_qs)
                    child_question_qs.update(detail_text=detail[i])
        # end_time = time.time()
        # return JsonResponse({
        #     'answer': f'共修改了{num}条数据。',
        #     'time': str(end_time-start_time)
        # })
    if not uid:    
        uid = [datetime.now().strftime('%Y%m%d%H%M%S') + uuid.uuid4().hex for _ in range(len(text))]
    parent_id = ['']*len(text)
    for i in range(len(text)):
        if sim_text and sim_text[i]:
            for item in sim_text[i]:
                text.append(item["extend_question"])
                embedding_list.append(embedding_list[i])
                meta_data.append(meta_data[i])
                source.append(source[i])
                detail.append(detail[i])
                base_name.append(base_name[i])
                base_type.append(base_type[i])
                project_name.append(project_name[i])
                uid.append(str(item["extend_id"]))
                parent_id.append(uid[i])

    data = pd.DataFrame({
        'uid': uid,
        'text': text, 
        'detail': detail, 
        'embedding': embedding_list, 
        'meta_data': meta_data,
        'source': source, 
        'base_name': base_name,
        'parent_id':parent_id,
        'base_type': base_type, 
        'project_name': project_name                    
    })
    # 去重
    data = data.drop_duplicates(subset=["text", "embedding", "base_name"])

    disable_insert = 0
    to_insert_data = 0
    for (emb, base_name, project_name), tmp_data in data.groupby(['embedding', 'base_name','project_name']):
        # 获取维度m
        if emb not in embedding_model_dict.keys():
            print(f'embedding:{emb}未录入,采用默认的chatlaw')
            emb = 'chatlaw'
            tmp_data['embedding'] = 'chatlaw'

        tmp_result_list = []
        tmp_count = len(tmp_data)
        # 单条插入sql
        step_bar = tqdm(range(tmp_count),
                        desc=f'insert data with embedding {emb} to mysql database',
                        )

        res_df = sql_manager.insert_multi_dataframe(tmp_data, step_bar, comment, source2id)
        tmp_data = res_df

        uid_list = tmp_data['uid'].to_list()

        uid_v = {tmp_data.index.tolist()[i]: uid_list[i] for i in range(len(uid_list))}
        # 获取embedding
        embedding_method = embedding_model_dict.get(emb)
        dim = embedding_dim[emb]
        # 向量collection
        vector_collection_name = project_name+'_'+str(dim)
        if vector_collection_name not in global_project_name_list:
            init_verctor_store()

        # 获取需要添加的id
        inser_uid_dict = {k: v for k, v in uid_v.items() if v != 'exist'}
        not_exist_data = list(inser_uid_dict.keys())
        insert_uid = list(inser_uid_dict.values())
        to_insert_data += len(not_exist_data)

        text_v = tmp_data.loc[not_exist_data, 'text'].to_list()

        if len(text_v)>0:
            text_v = [x[:1000] for x in text_v]
            vector = []
            for t in text_v:
                e = embedding_method(t, is_normalize)
                vector.append(e)
            vector = np.array(vector)
            fearture_v = [list(x.astype('float32')) for x in vector]
            source_v = tmp_data.loc[not_exist_data,'source'].to_list()
            meta_v = [str(x) for x in tmp_data.loc[not_exist_data,'meta_data'].to_list()]
            embedding_v = tmp_data.loc[not_exist_data,'embedding'].to_list()
            project_v = tmp_data.loc[not_exist_data,'project_name'].to_list()
            base_v = tmp_data.loc[not_exist_data,'base_name'].to_list()
            time_v = [datetime.now().strftime('%Y-%m-%d %H-%M-%S')]*len(not_exist_data)


            tmp_result_list.extend(
                [insert_uid, text_v, source_v, meta_v,
                 # label_v,
                 fearture_v, embedding_v, project_v, base_v, time_v])

            total_count = tmp_data.shape[0]

            batch_size = 2000

            # 如果第一次新建project, 则需要重新对collection字典初始化一次
            for i in range(0, total_count, batch_size):
                # Grab end index
                end = min(i + batch_size, total_count)
                # Convert dict to list of lists batch for insertion
                insert_list = [x[i:end] for x in tmp_result_list]
                # Insert into the collection.

                # try:
                # 写入向量数据库
                vector_db_name = ['IP_'+vector_collection_name, 'L2_'+vector_collection_name]
                for db_name in vector_db_name:
                    tmp_db = db[db_name]
                    tmp_db.insert_data(data=insert_list, partition_name=base_name)
                    logger.info(f'index: {db_name} 插入 {len(insert_list[0])} 条至 {base_name}')

                # except Exception as e:
                #     disable_insert += len(insert_list[0])
    end_time = time.time()
    return JsonResponse({
        'answer': f'共{data.shape[0]}条数据,其中有效数据{to_insert_data}条。',
        'time': str(end_time-start_time)
    })
    

@csrf_exempt
def search_data(request):
    # print(111)
    question = request.POST.get('question', None)
    embedding = request.POST.get('embedding', None)
    similarity = request.POST.get('similarity', None)
    labels = request.POST.get('label', None)
    # 根据逗号切分
    # label_sort = sort_by_pinyin(label)
    # todo: project和base支持str和list
    # 支持格式：
    # pro [base1, base2, base3]
    # [pro1, pro2] [[base1, base2], [base3, base4]]

    project_name = request.POST.get('project', None)
    base_name = request.POST.get('base', None)
    topk = int(request.POST.get('topk', 3))

    if "legal" in project_name:
        is_normalize = False
    else:
        is_normalize = True

    start_time = time.time()
    if project_name is None:
        end_time = time.time()
        return JsonResponse({
            'error': '项目名称不能为空',
            'time': str(end_time - start_time),
        })
    try:
        project_name_list = json.loads(project_name)
        multi_project = True
    except:
        project_name_list = [project_name]
        multi_project = False

    if base_name is None:
        end_time = time.time()
        return JsonResponse({
            'error': '知识库名称不能为空',
            'time': str(end_time - start_time),
        })
    try:
        base_name_list = json.loads(base_name)
        if multi_project and isinstance(base_name_list[0], list):
            pass
        elif (not multi_project) and isinstance(base_name_list, list):
            pass
        else:
            end_time = time.time()
            return JsonResponse({
                'error': '知识库名称参数传入错误',
                'time': str(end_time - start_time),
            })
    except:
        end_time = time.time()
        return JsonResponse({
            'error': '知识库名称应以列表形式传入',
            'time': str(end_time - start_time),
        })

        assert type(base_name) == list, \
            'text type must be str or list, if you choose multi data, type text must be list'



    base_name_dict = Base.objects.filter(project__name=project_name, status=1).values('name', 'embedding__name')
    if base_name:
        all_base_name_list = [x['name'] for x in base_name_dict]
        if not set(all_base_name_list).intersection(set(base_name)):
            end_time = time.time()
            
            return JsonResponse({
                # 'source_label':source_name_list,
                'error': f'知识库{base_name}未查询到',
                'time': str(end_time - start_time),
            })
 
    # 所有emb
    emb_list = [item['embedding__name'] for item in base_name_dict]

    # todo:
    project_dim_name_list = list(set([project_name+'_'+str(embedding_dim[item]) for item in emb_list]))
    # 没有交集==>没有查询到
    if not set(project_dim_name_list).intersection(set(global_project_name_list)):
        end_time = time.time()
        return JsonResponse({
            # 'source_label':source_name_list,
            'error': f'项目{project_name}未查询到',
            'time': str(end_time - start_time),
        })

    source_id_list = []

    if labels:
        labels = list(labels.replace('，',',').split(','))
        for label in labels:
            tmp_source_dict = Label.objects.filter(name=label, project__name=project_name, status=1).values('source', 'source__name')
            source_id_list.extend([x['source'] for x in tmp_source_dict])
        detail_id_dict = Detail.objects.filter(source__in=source_id_list, status=1).values('uid')
        detail_id_list = [x['uid'] for x in detail_id_dict]
        comment = ''

        # 指定embedding
        if embedding is not None:
            # 如果embedding不在该数据库中
            if embedding not in emb_list:
                comment = f'该embedding:{embedding}不存在, 改为{default_emb}.'
                embedding = default_emb
            base_name_dict = [item for item in base_name_dict if item['embedding__name'] == embedding]
            project_dim_name_list = [project_name+'_'+str(embedding_dim[embedding])]

        # 指定base_name
        if base_name is not None:
            # if base_name not in base_list:
            #     comment += f'该base:{base_name}不存在, 搜索该项目下所有知识库.'
            # else:
            base_name_dict = [item for item in base_name_dict if item['name'] in base_name]
        # base_name = [item[0] for item in base_name]
        total_result = []
        for item in base_name_dict:
            tmp_base_name = item['name']

            # 从数据库中获取该表的所有embedding
            tmp_embedding_type = item['embedding__name']
            embedding_method = embedding_model_dict.get(tmp_embedding_type)
            # 获取维度
            tmp_emb_dim = embedding_dim[tmp_embedding_type]

            vector = embedding_method(question, is_normalize)
            expr = f"uid in {str(detail_id_list)} && embedding=='{tmp_embedding_type}'"
            # 维度匹配
            tmp_project_dim_name_list = [x for x in project_dim_name_list if str(tmp_emb_dim) in x]
            for project_dim_name in tmp_project_dim_name_list:
                if similarity == 'L2':
                    search_params = {
                        "metric_type": "L2",
                        "params": {"nprobe": 10},
                    }

                    res = db[f'L2_{project_dim_name}'].get_search(query_vector=[vector], partition_names=tmp_base_name, topk=topk, expr=expr, is_dict=True,
                                                                  search_params=search_params)

                else:
                    search_params = {
                        "metric_type": "IP",
                        "params": {"nprobe": 10},
                    }
                    res = db[f'IP_{project_dim_name}'].get_search(query_vector=[vector], partition_names=tmp_base_name, topk=topk, expr=expr, is_dict=True,
                                                                  search_params=search_params)

                uid_list = [item['uid'] for item in res[0]]
                uid2score = {item['uid']: item['dis'] for item in res[0]}
                # logger.info('uid_list', uid_list)
                
                result = Detail.objects.filter(uid__in=uid_list, status=1).values('uid', 'text', 'detail_text', 'source__name',
                                                                                  'embedding__name', 'meta', 'base__name')
                # logger.info(result)
                for i in range(len(result)):
                    uid = result[i]['uid']
                    result[i].update({'distance': uid2score[uid]})
                total_result.extend(result)     
                
        if similarity == "IP":
            reverse = True
        else:
            reverse = False
        total_result = sorted(total_result, key= lambda x: x["distance"], reverse=reverse)[:topk]
        end_time = time.time()

        return JsonResponse({
            'answer': list(total_result),
            'comment': comment,
            'time': str(end_time-start_time),
        })
    else:
        comment = ''
        # 指定embedding
        if embedding is not None:
            # 如果embedding不在该数据库中
            if embedding not in emb_list:
                comment = f'该embedding:{embedding}不存在, 改为{default_emb}.'
                logger.info(comment)
                embedding = default_emb
            project_dim_name_list = [project_name+'_'+str(embedding_dim[embedding])]

            base_name_dict = [item for item in base_name_dict if item['embedding__name'] == embedding]
        # 指定base_name
        if base_name is not None:
            base_name_dict = [item for item in base_name_dict if item['name'] in base_name]

        total_result = []
        for item in base_name_dict:
            tmp_base_name = item['name']

            # 从数据库中获取该表的所有embedding
            tmp_embedding_type = item['embedding__name']

            embedding_method = embedding_model_dict.get(tmp_embedding_type)
            # 获取维度
            tmp_emb_dim = embedding_dim[tmp_embedding_type]
            vector = embedding_method(question, is_normalize)
            expr = f"embedding=='{tmp_embedding_type}'&& base_name=='{tmp_base_name}' "
            # 维度匹配
            tmp_project_dim_name_list = [x for x in project_dim_name_list if str(tmp_emb_dim) in x]
            for project_dim_name in tmp_project_dim_name_list:
                logger.info(tmp_base_name)
                if similarity == 'L2':
                    search_params = {
                        "metric_type": "L2",
                        "params": {"nprobe": 10},
                    }
                    res = db[f'L2_{project_dim_name}'].get_search(query_vector=[vector], partition_names=tmp_base_name,
                                                                  topk=topk*3, expr=expr, is_dict=True,
                                                                  search_params=search_params)

                else:
                    search_params = {
                        "metric_type": "IP",
                        "params": {"nprobe": 10},
                    }
                    res = db[f'IP_{project_dim_name}'].get_search(query_vector=[vector], partition_names=tmp_base_name,
                                                                  topk=topk*3, expr=expr, is_dict=True,
                                                                  search_params=search_params)

                # print('res:', res)
                uid_list = [item['uid'] for item in res[0]]
                uid2score = {item['uid']: item['dis'] for item in res[0]}
                result = Detail.objects.filter(uid__in=uid_list, status=1).values('uid','text', 'detail_text', 'source__name',
                                                                                  'embedding__name', 'meta', 'base__name')
                # print(result)
                for i in range(len(result)):
                    uid = result[i]['uid']
                    result[i].update({'distance': uid2score[uid]})
                total_result.extend(result)

        # 重排序模型
        if search_pipline_paras["rerank"]["if_use"]:
            candidate_results = pd.DataFrame(total_result)
            reranker_tool = SingleReranker()
            output_json = reranker_tool.get_results(question=question, candidate_results=candidate_results, topk=topk)
        else:
            output_json = total_result
        
        # if similarity == "IP":
        #     reverse = True
        # else:
        #     reverse = False
        # total_result = sorted(total_result, key= lambda x: x["distance"], reverse=reverse)[:topk]
        # logger.info(total_result)
        end_time = time.time()
        return JsonResponse({
            'answer': output_json,
            # 'answer': list(total_result),
            'comment': comment,
            'time': str(end_time - start_time)
        })


@csrf_exempt
def delete_data(request):
    source = request.POST.get('source', None)
    source_id = request.POST.get('source_id', None)
    
    question = request.POST.get('question', None)
    is_association = request.POST.get('is_association', True)
    if is_association == "false":
        is_association = False
    # print(is_association)
    
    question_uid = request.POST.get('question_uid', None)
    if question_uid:
        question_uid_list = json.loads(question_uid)
        question_uid_list = [str(qu) for qu in question_uid_list]
    else:
        question_uid_list = None
    delete_base_name = request.POST.get('delete_base', None)
    
    project_name = request.POST.get('project', None)
    base_name = request.POST.get('base', None)
    embedding = request.POST.get('embedding', None)
    
    start_time = time.time()

    if delete_base_name:
        deleta_base(delete_base_name, project_name=project_name)
    if question_uid_list:
        del_uid_list = []
        if embedding:
            embedding_dim_list = [embedding_dim[embedding]]
        else:
            embedding_dim_list = None
        if is_association:
            for question_uid in question_uid_list:
                # 找主问题及其相似问题
                main_qs = Detail.objects.filter(uid=question_uid)
                if main_qs.exists():
                    base_name = main_qs[0].base.name
                    project_name = main_qs[0].base.project.name
                    # embedding_dim_list = list(set([q.embedding.dims for q in main_qs]))

                    del_uid_list.append(question_uid)
                    sim_qs = Detail.objects.filter(parent_id=question_uid)
                    del_uid_list.extend([s.uid for s in sim_qs])
                
        else:
            # 只删除单个问题
            for question_uid in question_uid_list:
                main_qs = Detail.objects.filter(uid=question_uid)
                if main_qs.exists():
                    base_name = main_qs[0].base.name
                    project_name = main_qs[0].base.project.name
                    # embedding_dim_list = list(set([q.embedding.dims for q in main_qs]))

                    del_uid_list.append(question_uid)
        
        if del_uid_list:
            # milvus删除
            db_name_list = []
            if not embedding_dim_list:
                embedding_dim_list = list(set([e.dims for e in Config.objects.all()]))
            for d in embedding_dim_list:
                db_name_list.append(f"IP_{project_name}_{d}")
                db_name_list.append(f"L2_{project_name}_{d}")
            for db_name in db_name_list:
                try:
                    tmp_db = db[db_name]
                    if tmp_db.collection.has_partition(partition_name=base_name):
                        expr = f"uid in {str(del_uid_list)}"
                        tmp_db.delete_entity(expr=expr, partition_name=base_name)
                        logger.info(f'删除{db_name}库向量分区{base_name}中符合条件的知识')
                except Exception as e:
                    logger.info("error", e)
            # mysql删除
            Detail.objects.filter(uid__in=del_uid_list, status=1).delete()
        
        
    if question and base_name:
        del_parent_id = None
        del_uid_list = []
        base = Base.objects.filter(name=base_name, project__name=project_name)
        question_query = Detail.objects.filter(text=question, base=base[0])
        if question_query.exists():
            uid = question_query[0].uid
            del_uid_list.append(uid)
            if is_association:
                q_parent_id = question_query[0].parent_id
                if q_parent_id and q_parent_id != "":
                    del_parent_id = q_parent_id
                else:
                    # 删除的是主问题
                    del_parent_id = None
                    assc_obj = Detail.objects.filter(parent_id=uid)
                    for o in assc_obj:
                        del_uid_list.append(o.uid)
            else:
                del_parent_id = uid
                
        if del_uid_list:       
            db_name_list = [f"IP_{project_name}", f"L2_{project_name}"]
            if embedding:
                dim_list = [embedding_dim[embedding]]
            dim_list = list(set(embedding_dim.values()))

            for n in db_name_list:
                for dim in dim_list:
                    db_name = f"{n}_{dim}"
                    try:
                        tmp_db = db[db_name]
                        if tmp_db.collection.has_partition(partition_name=base_name):
                            expr = f"uid in {str(del_uid_list)}"
                            tmp_db.delete_entity(expr=expr, partition_name=base_name)
                            logger.info(f'删除{db_name}库向量分区{base_name}中符合条件的知识')
                    except Exception as e:
                        logger.info("error", e)

            Detail.objects.filter(uid__in=del_uid_list, base=base[0], status=1).delete()
            end_time = time.time()

        return JsonResponse({
            'del_parent_id': del_parent_id,
            'time': str(end_time-start_time),
        })
        
        
    if source or source_id:
        # # 单一某个项目
        if source_id:
            base_name_dict = Detail.objects.filter(source__uid=source_id).values('uid', 'base__name').distinct()
            if not base_name_dict:
                return JsonResponse({
                    "message": "无解析数据！"
                })
            detail_id_list = [x['uid'] for x in base_name_dict]
            base_name_set = set([x['base__name'] for x in base_name_dict])
            project_name = Base.objects.get(name=list(base_name_set)[0], status=1).project.name
            res = Detail.objects.filter(source__uid=source_id, status=1).delete()
            
        if source:
            base_name_dict = Detail.objects.filter(source__name=source,  base__project__name=project_name)\
                        .values('uid', 'base__name').distinct()
            detail_id_list = [x['uid'] for x in base_name_dict]
            base_name_set = set([x['base__name'] for x in base_name_dict])
            
            res = Detail.objects.filter(source__name=source, status=1, base__project__name=project_name).delete()
        # 所有的source
        for db_name, tmp_db in db.items():
            true_db_name = '_'.join(db_name.split('_')[1:-1])

            if project_name == true_db_name:
                for tmp_base_name in base_name_set:
                    try:
                        if tmp_db.collection.has_partition(partition_name=tmp_base_name):
                            expr = f"uid in {str(detail_id_list)}"
                            # tmp_db.load_collection()
                            tmp_db.collection.release()
                            # tmp_db.release_partition(partition_name=base_name)
                            tmp_db.delete_entity(expr=expr, partition_name=tmp_base_name)
                            tmp_db.collection.load()
                            # tmp_db.load_partition(partition_name=base_name)
                            logger.info(f'删除{db_name}库向量分区{tmp_base_name}中来源为{source}的知识')
                    except Exception as e:
                        logger.info("error", e)

    end_time = time.time()

    return JsonResponse({
        # 'answer': list(total_result),
        # 'comment': comment,
        'time': str(end_time-start_time),
    })


def deleta_base(base_name, project_name):
    res_detail = Detail.objects.filter(base__name=base_name, base__project__name=project_name).delete()
    res_base = Base.objects.filter(name=base_name, project__name=project_name).update(status=0)
    all_project = Project.objects.filter(status=1).values('name')
    for item in all_project:
        tmp_project_name = item['name']
        tmp_project = Base.objects.filter(project__name=tmp_project_name).values('project__name')
    for db_name, tmp_db in db.items():
        true_db_name = '_'.join(db_name.split('_')[1:-1])
        # print(db_name, true_db_name)
        if project_name == true_db_name:
            try:
                if tmp_db.collection.has_partition(partition_name=base_name):
                    # logger.info(db_name, true_db_name)
                    # tmp_db.load_collection()
                    tmp_db.collection.release()
                    # tmp_db.release_partition(partition_name=base_name)
                    tmp_db.drop_partition(partition_name=base_name)
                    tmp_db.collection.load()
                    # tmp_db.load_partition(partition_name=base_name)
                    logger.info(f'删除{db_name}库向量分区{base_name}')
            except:
                pass


@csrf_exempt
def insert_data_from_mysql(request):
    project_name = request.POST.get('project', None)
    if "legal" in project_name:
        is_normalize = False
    else:
        is_normalize = True
    start_time = time.time()
    batch_size = 2000
    detail_list = Detail.objects.filter(base__project__name=project_name, status=1)\
        .values('uid', 'text', 'source', 'meta', 'embedding__name', 'base__project__name', 'base__name')

    # 如果不存在该项目或者项目下无数据
    if not detail_list:
        end_time = time.time()
        return JsonResponse({
        'error':f'该{project_name}项目下无数据',
        'time': str(end_time-start_time),
    })

    # 项目下有数据, 则milvus对应collection存在
    uid = [item['uid'] for item in detail_list]
    text = [item['text'] for item in detail_list]
    source = [item['source'] for item in detail_list]
    meta = [item['meta'] for item in detail_list]
    embedding_name = [item['embedding__name'] for item in detail_list]
    project_name_list = [item['base__project__name'] for item in detail_list]
    base_name = [item['base__name'] for item in detail_list]
    # create_time = [item['create_time'] for item in detail_list]

    data = pd.DataFrame({'text': text, 'uid': uid, 'embedding': embedding_name, 'meta_data': meta,
                         'source': source, 'base_name': base_name,
                         'project_name': project_name_list})
    # tmp_result_list = []
    logger.info(len(data))
    for (tmp_base_name, tmp_embedding), tmp_df in data.groupby(['base_name', 'embedding']):
        logger.info(tmp_base_name, tmp_embedding)
        # tmp_result_list = []
        embedding_method = embedding_model_dict.get(tmp_embedding)

        dim = embedding_dim[tmp_embedding]
        # 向量collection
        vector_collection_name = project_name + '_' + str(dim)
        logger.info(vector_collection_name)

        insert_uid = tmp_df.uid.to_list()
        text_v = [x[:1000] for x in tmp_df.text.to_list()]
        
        vector = []
        for t in text_v:
            e = embedding_method(t, is_normalize)
            vector.append(e)
        vector = np.array(vector)
        fearture_v = [list(x.astype('float32')) for x in vector]
        
        # vector = embedding_method(tmp_df.text.to_list(), is_normalize)
        # fearture_v = [list(x.astype('float')) for x in vector]
        
        source_v = tmp_df.source.astype(str).to_list()
        meta_v = tmp_df.meta_data.astype(str).to_list()
        # label_v = tmp_data.loc[not_exist_data,'label'].to_list()
        embedding_v = tmp_df.embedding.to_list()
        project_v = tmp_df.project_name.to_list()
        base_v = tmp_df.base_name.to_list()
        time_v = [datetime.now().strftime('%Y-%m-%d %H-%M-%S')] * len(tmp_df)

        tmp_result_list = [insert_uid, text_v, source_v, meta_v,
                           fearture_v, embedding_v, project_v, base_v, time_v]

        total_count = len(tmp_result_list[0])
        tmp_db_name_list = ['IP_' + vector_collection_name, 'L2_' + vector_collection_name]
        # 先删除原始partition
        for tmp_db_name in tmp_db_name_list:
            tmp_db = db[tmp_db_name]
            if tmp_db.collection.has_partition(partition_name=tmp_base_name):
                tmp_db.collection.release()
                # tmp_db.release_partition(partition_name=base_name)
                tmp_db.drop_partition(partition_name=tmp_base_name)
                tmp_db.collection.load()
                logger.info(f'删除{tmp_db_name}库向量分区{tmp_base_name}')

        for i in range(0, total_count, batch_size):
            # Grab end index
            end = min(i + batch_size, total_count)
            # Convert dict to list of lists batch for insertion
            insert_list = [x[i:end] for x in tmp_result_list]
            # Insert into the collection.

            # try:
            # 写入向量数据库
            for db_name in tmp_db_name_list:
                tmp_db = db[db_name]
                tmp_db.insert_data(data=insert_list, partition_name=tmp_base_name)
                logger.info(f'index: {db_name} 插入 {len(insert_list[0])} 条至 {tmp_base_name}')

            # except Exception as e:
            #     print(e)
    end_time = time.time()
    return JsonResponse({
        'result': f'已恢复向量数据库',
        'time': str(end_time-start_time),
    })

@csrf_exempt
def test_data(request):
    text = request.POST.get('text', None)
    print(text, type(text))
    print(json.loads(text), type(json.loads(text)))
    return JsonResponse({
        'answer': '你好',
        'time': 1,
    })


@csrf_exempt
def get_file_content(request):
    project_name = request.POST.get('project_name', None)
    base_name = request.POST.get('base_name', None)
    
    source = request.POST.get('source', None)
    source_id = request.POST.get('source_id', None)
    
    source_qs = Source.objects.filter(uid=source_id, name=source)
    if source_qs.exists():
        source_obj = source_qs[0]
        project = Project.objects.get(name=project_name)
        base = Base.objects.get(name=base_name, project=project)
        file_contents_qs = Detail.objects.filter(source=source_obj, base=base)
        file_contents = [{"contentId": c.uid, "content": c.text, "extDocContent": c.detail_text} for c in file_contents_qs]
        return JsonResponse({
            'status': 200,
            'data': file_contents,
            'insert': 0
        })
    else:
        return JsonResponse({
            'status': 200,
            'data': [],
            'insert': 1
        })


@csrf_exempt
def compare_sim_question(request):
    text = request.POST.get('text', None)
    text_uid = request.POST.get('text_uid', None)
    detail_text = request.POST.get('detail_text', None)
    project_name = request.POST.get('project', None)
    base_name = request.POST.get('base', None)
    sim_text = request.POST.get('sim_text', None)
    if sim_text:
        sim_text = json.loads(sim_text)
        new_sim_text = [str(s["extend_question"]) for s in sim_text]
    else:
        new_sim_text = []
    if text_uid:
        try:
            main_question = Detail.objects.get(uid=text_uid)
            # 判断主问题question是否改变了
            if main_question.text == text:
                update_question = "false"
            else:
                update_question = "true"
            # 判断相似问题是否需要增加或删除
            sim_qs = Detail.objects.filter(parent_id=text_uid)
            if sim_qs.exists():
                old_sim_text = [s.text for s in sim_qs]
                old_sim_text_id = [{"extend_id": s.uid,"extend_question": s.text} for s in sim_qs]
            else:
                old_sim_text = []
                old_sim_text_id = []
            add_sim_text = list(set(new_sim_text).difference(set(old_sim_text)))
            delete_sim_text = list(set(old_sim_text).difference(set(new_sim_text)))
            
            add_sim_item = []
            delete_sim_item = []
            for item in sim_text:
                if item["extend_question"] in add_sim_text:
                    add_sim_item.append(item)
                
            for item in old_sim_text_id:
                if item["extend_question"] in delete_sim_text:
                    delete_sim_item.append(item)
            
            # 如果只改答案了，直接在这个函数中修改
            if update_question == "false" and not add_sim_item and not delete_sim_item:
                if main_question.detail_text != detail_text:
                    Detail.objects.filter(Q(uid=text_uid)|Q(parent_id=text_uid)).update(detail_text=detail_text)
        except Exception as e:
            # logger.info("error", e)
            detail_qs = Detail.objects.filter(uid=text_uid)
            if not detail_qs.exists():
                return JsonResponse({
                    "status": 200,
                    "data": {
                        "update_question": "false",
                        "add_sim_item": sim_text,
                        "delete_sim_item": []
                    },
                    "insert": True
                })
            else:
                return JsonResponse({
                    'status': 209,
                    'message': e
                })
                
            # return JsonResponse({
            #     'status': 209,
            #     'message': f'uid为{text_uid}的问题不存在！'
            # })
    else:
        return JsonResponse({
            "status": 401,
            "message": "参数text_uid不能为空！"
        })
        
    return JsonResponse({
        "status": 200,
        "data": {
            "update_question": update_question,
            "add_sim_item": add_sim_item,
            "delete_sim_item": delete_sim_item
        }
    })


@csrf_exempt
def build_project(request):
    project_name = request.POST.get("project_name", None)  # 项目名称
    project_desc = request.POST.get("project_desc", None)  # 项目描述
    # 判断项目是否存在
    project_qs = Project.objects.filter(name=project_name)
    if project_qs.exists():
        project_qs.update(comment=project_desc, status=1)
        return JsonResponse({
            "status": 200,
            "message": "项目创建成功！"
        })
    else:
        project = Project(
            name=project_name,
            comment=project_desc
        )
        project.save()
        return JsonResponse({
            "status": 200,
            "message": "项目创建成功！"
        })
    
    
@csrf_exempt
def delete_project(request):
    project_name = request.POST.get("project_name", None)  # 项目名称
    # 判断项目是否存在
    project_qs = Project.objects.filter(name=project_name)
    if project_qs.exists():
        project_qs.update(status=0)
        return JsonResponse({
            "status": 200,
            "message": "项目删除成功！"
        })
    else:
        return JsonResponse({
            "status": 209,
            "message": "项目不存在！"
        })
    
    
@csrf_exempt
def update_group_name(request):
    project_name = request.POST.get('project_name', None)
    old_group_name = request.POST.get('old_group_name', None)
    new_group_name = request.POST.get('new_group_name', None)
    base_qs = Base.objects.filter(project__name=project_name, group_name=old_group_name)
    base_qs.update(group_name=new_group_name)
    return JsonResponse({
        "status": 200,
        "message": "分组名称更新成功！"
    })
    


@csrf_exempt
def build_base(request):
    project_name = request.POST.get("project_name", None)  # 项目名称
    group_name = request.POST.get("group_name", None)  # 分组名称
    base_name = request.POST.get("base_name", None)  # 知识库名称
    base_desc = request.POST.get("base_desc", None)  #知识库描述
    embedding_type = request.POST.get("embedding_type", "m3e-base")
    try:
        embedding = Config.objects.get(name=embedding_type)
    except Exception as e:
        return JsonResponse({
                "status": 209,
                "message": f"embedding {embedding_type} 不存在！"
            })
    
    # 判断项目是否存在
    project_qs = Project.objects.filter(name=project_name, status=1)
    if project_qs.exists():
        project = project_qs[0]
        # 判断知识库是否存在
        base_qs = Base.objects.filter(project=project, name=base_name)
        if base_qs.exists():
            # 改名
            base_qs.update(category=base_desc, group_name=group_name, status=1)
            return JsonResponse({
                "status": 200,
                "message": "知识库创建成功！"
            })
        else:
            base = Base(
                project=project,
                group_name=group_name,
                name=base_name,
                category=base_desc,
                embedding=embedding
            )
            base.save()
            return JsonResponse({
                "status": 200,
                "message": "知识库创建成功！"
            })
    else:
        return JsonResponse({
            "status": 209,
            "message": "项目不存在！"
        })


@csrf_exempt
def get_mapping_project_base(request):
    project_name_zh = request.POST.get('project_name_zh', None)
    project_qs = Project.objects.filter(comment=project_name_zh)
    if project_qs.exists():
        project = project_qs[0]
        project_name = project.name
    else:
        return JsonResponse({
            'status': 209,
            'message': "project不存在",
            'project_name': '',
            'base_name': ''
        })
    base_name_zh = request.POST.get('base_name_zh', None)
    suffix = request.POST.get('suffix', None)
    suffix_id = "1" if suffix=='faq' else "2"
    base_qs = Base.objects.filter((Q(project=project)) & (Q(category=base_name_zh)) & ((Q(name__endswith=suffix)) | (Q(name__endswith=suffix_id))))
    if base_qs.exists():
        base = base_qs[0]
        base_name = base.name
        return JsonResponse({
            'status': 200,
            'base_name': base_name,
            'project_name': project_name
        })
    else:
        return JsonResponse({
            'status': 209,
            'message': "base不存在",
            'project_name': project_name,
            'base_name': ''
        })