import os
import pandas as pd
import numpy as np
import math
import sys
import time
import shutil
import torch
from glob import glob
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, AutoModelForCausalLM, BertTokenizer, AutoModelForSequenceClassification

from utlis import path_handle, clean_file, post_request, check_internet, know_df_cols, all_df_cols, match_df_cols
from kb_encoder import encode_user_kb
from user_learner import CBD_Memory, SequencePredictor, MarkerPredictor
from encoder_finetuner import EncoderFinetuner, gen_train_data_from_queries, gen_queries, load_gen_queries, gen_train_data_from_contents, gen_corpus
from loguru import logger


def check_create_user(kb_data_folder, kb_vecs_folder, setting_csv="Meta setting.csv", subfolders=["Supplementary Files", "templates", "默认目录"]):
    # 1. 检查主目录是否存在，不存在就创建
    if not os.path.exists(kb_data_folder):
        os.makedirs(kb_data_folder)
        os.makedirs(kb_vecs_folder)
        print(f"✅ 创建主目录: {kb_data_folder} 以及 {kb_vecs_folder}")

        for sub in subfolders:
            sub_path = os.path.join(kb_data_folder, sub)
            if not os.path.exists(sub_path):
                os.makedirs(sub_path)
                print(f"✅ 创建子目录: {sub_path}")
            else:
                print(f"📂 子目录已存在: {sub_path}")
        if not setting_csv==None:
            parent_dir = os.path.abspath(os.path.join(kb_data_folder, ".."))
            csv_files = glob(os.path.join(parent_dir, setting_csv))
            try:
                dest_path = os.path.join(kb_data_folder, os.path.basename(csv_files[0]))
                shutil.copy(csv_files[0], dest_path)
                print(f"✅ 复制 CSV 文件到: {dest_path}")
            except:
                print("⚠️ 未在上级目录找到 .csv 文件")
    else:
        print(f"📂 主目录已存在: {kb_data_folder}")
    

def clean_kb(USER_SETTINGS, dirs_to_keep=['images', 'tables', 'templates', 'Supplementary Files']):
    '''
        :function delete all KB relevant contents, use with caution
    '''
    print('\tclean current KB, will delete all files and directories...')    
    for root, dirs, files in os.walk(USER_SETTINGS['KB_PATH'], topdown=False):
        for dir_name in dirs:
            dir_path = os.path.join(root, dir_name)
            if dir_name not in dirs_to_keep:
                shutil.rmtree(dir_path)
                print(f"Deleted: {dir_path}")
            else:
                print(f"Kept: {dir_path}")
    
    specific_dirs2clean = [os.path.join(USER_SETTINGS['KB_PATH'], 'images'), os.path.join(USER_SETTINGS['KB_PATH'], 'tables'), USER_SETTINGS['TEMP_RES_PATH']]
    for dir2clean in specific_dirs2clean:
        for item in os.listdir(dir2clean):
            item_path = os.path.join(dir2clean, item)
            if os.path.isfile(item_path):
                os.remove(item_path)
                print(f"Deleted file: {item_path}")
    

def customized_meta_settings(KB_PATH, split_char=';'):
    meta_path = os.path.join(KB_PATH, 'Meta setting.csv')
    try:
        meta_df = pd.read_csv(meta_path, encoding='utf-8')
    except:
        meta_df = pd.read_csv(meta_path, encoding='gbk')
        
    meta_dic = {}
    for _, row in meta_df.iterrows():
        key = row['variable']
        val = row['value']
        if isinstance(val, float) and math.isnan(val):
            continue
        
        if ';' in val:
            val = val.split(split_char)
        elif 'TRUE' in val:
            val = True
        elif 'FALSE' in val:
            val = False
        elif 'COLOR' in key:
            val = tuple(int(v) for v in val)  
        else:
            pass
        meta_dic.update({key : val})
    
    meta_dic['ROOT_LEN'] = len(path_handle(KB_PATH, 'split'))
    meta_dic['TOP_K'] = int(meta_dic['TOP_K'])
    meta_dic['N_TRIGGER'] = int(meta_dic['N_TRIGGER'])
    meta_dic['BATCH_SIZE'] = int(meta_dic['BATCH_SIZE'])
    meta_dic['N_EPOCHS'] = int(meta_dic['N_EPOCHS'])
    
    meta_dic['CLLM_THRESHOLD'] = int(meta_dic['CLLM_THRESHOLD'])
    meta_dic['REWRITE_THRESHOLD'] = int(meta_dic['REWRITE_THRESHOLD'])
    meta_dic['SIZE'] = int(meta_dic['SIZE'])
    meta_dic['TABLE_SIZE'] = int(meta_dic['TABLE_SIZE'])
    
    meta_dic['THRESHOLD'] = float(meta_dic['THRESHOLD'])
    meta_dic['SUMMARY_THRESHOLD'] = float(meta_dic['SUMMARY_THRESHOLD'])
    meta_dic['L_RATE'] = float(meta_dic['L_RATE'])
    meta_dic['OCR_TIMEOUT'] = float(meta_dic['OCR_TIMEOUT'])

    # check GPU
    device = "cuda" if torch.cuda.is_available() else "cpu"
    logger.info('device:{}', device)
    
    # check internet
    if_connet = check_internet()
    if not if_connet:
        meta_dic['USE_LOCAL_LLM'] = True
        
    if device=='cpu':
        meta_dic['USE_LOCAL_LLM'] = False
        # meta_dic['LOCAL_SUMMARY'] = False
    return meta_dic


def read_user_model_config(USER_SETTINGS):
    config = {}
    with open(USER_SETTINGS['CONFIG_PATH'], 'r') as file:
        lines = file.readlines()
        for current_line in lines:
            key, value = current_line.split('\t')
            config[key.strip()] = value.strip()
    config['HISTORY_K'] = int(config['HISTORY_K'])

    '''
        :LLMs
    '''
    llm_apis = { }
    sys.path.append(os.path.abspath('../LLM_APIs/DS_API'))
    from call_ds import call_ds
    llm_apis.update({'ds_api' : call_ds})
    
    sys.path.append(os.path.abspath('../LLM_APIs/Qwen_API'))
    from call_qwen import call_qwen, call_qwen_stream
    llm_apis.update({'qwen_api' : call_qwen, 'qwen_stream_api':call_qwen_stream})
    
    sys.path.append(os.path.abspath('../LLM_APIs/GPT_API'))
    from call_GPT import call_gpt, call_gpt_stream
    llm_apis.update({'gpt_api' : call_gpt, 'gpt_stream_api':call_gpt_stream})
    
    if USER_SETTINGS['USE_LOCAL_LLM']:
        device = "cuda" if torch.cuda.is_available() else "cpu"
        sys.path.append(os.path.abspath('../LLM_APIs/Local_API'))
        from call_locals import call_local_llm, call_local_stream

        local_llm_path = os.path.join(USER_SETTINGS['LOCAL_MODELS_DIR'], USER_SETTINGS['LOCAL_LLM_NAME'])
        local_reranker_path = os.path.join(USER_SETTINGS['LOCAL_MODELS_DIR'], USER_SETTINGS['LOCAL_RERANKER'])
        local_llm_tz = AutoTokenizer.from_pretrained(local_llm_path, trust_remote_code=True)
        local_llm = AutoModelForCausalLM.from_pretrained(
                            local_llm_path,
                            torch_dtype="auto",
                            low_cpu_mem_usage=True,
                            trust_remote_code=True,
                        ).to(device).eval()
    else:
        call_local_llm = None
        call_local_stream = None
        local_llm_tz = None
        local_llm = None
    llm_apis.update({'local_api' : call_local_llm, 'local_stream_api': call_local_stream})

    '''
        :encoders
    '''    
    if USER_SETTINGS['USE_LOCAL_LLM']:
        encoder_path = os.path.join(USER_SETTINGS['LOCAL_MODELS_DIR'], USER_SETTINGS['LOCAL_ENCODER'])
        tokenizer = AutoTokenizer.from_pretrained(encoder_path, trust_remote_code=True)
        model_kwargs = {'device': device}
        model = SentenceTransformer(encoder_path, model_kwargs, device=device)
    else:
        tokenizer = None
        model = None

    def _gc():
        import gc
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()

    def vectorize_texts(texts, tokenizer=None, model=None, use_tensor=False, sentence=False):
        if USER_SETTINGS['USE_LOCAL_LLM']:
            _gc()
            with torch.no_grad():
                embeddings = model.encode(texts, convert_to_tensor=use_tensor)
            return texts, embeddings
        
        req_body = {'query':texts}
        msg, status_code = post_request('http://218.17.187.47:35010/toembedding', req_body)
        if status_code != 200:
            raise ConnectionError(msg)
        embedding = np.array(msg['embedding'], dtype=np.float32)
        return texts, embedding

    def rerank_m3(msg, paths, details):
        rr_pairs = []
        for detail in details:
            rr_pairs.append([msg, detail])
        
        if USER_SETTINGS['USE_LOCAL_LLM']:
            _gc()
            with torch.no_grad():
                inputs = local_rerank_tz(rr_pairs, padding=True, truncation=True, return_tensors='pt', max_length=512)
                inputs = {key: value.to(device) for key, value in inputs.items()}  # 将输入移到 GPU
                scores = local_rerank(**inputs, return_dict=True).logits.view(-1, ).float().cpu().tolist()
        else:
            req_body = {'rr_pairs':rr_pairs}
            msg, status_code = post_request('http://218.17.187.47:35010/rerank', req_body)
            if status_code != 200:
                raise ConnectionError(msg)
            scores = msg['scores']

        sorted_combined = sorted(list(zip(paths, details, scores)), key=lambda x: x[2], reverse=True)
        paths, details, scores = zip(*sorted_combined)
        return list(paths), list(details), list(scores)
    return config, llm_apis, model, tokenizer, local_llm, local_llm_tz, vectorize_texts, rerank_m3

#创建知识库
def settings_(user='', parent=None, kb_term='KB_DATA_', kb_vec_term='KB_VECS_'):
    if parent==None:
        KB = '..' + os.sep + kb_term
        KB_PATH = '..' + os.sep + kb_term + user
        TEMP_RES_PATH = '..' + os.sep + kb_vec_term + user
    else:
        KB = parent + os.sep + kb_term
        KB_PATH = parent + os.sep + kb_term + user
        TEMP_RES_PATH = parent + os.sep + kb_vec_term + user
    
    check_create_user(KB_PATH, TEMP_RES_PATH)
    USER_SETTINGS = customized_meta_settings(KB_PATH)
    
    USER_SETTINGS['USER_ID'] = user
    USER_SETTINGS['KB_PATH'] = KB_PATH
    USER_SETTINGS['TEMP_RES_PATH'] = TEMP_RES_PATH
    USER_SETTINGS['CONFIG_PATH'] = '../LLM_APIs/config.txt'
    USER_SETTINGS['SUPP_FILE_PATH'] = os.path.join(KB_PATH, 'Supplementary Files')
    
    USER_SETTINGS['OUT_PATH'] = os.path.join(TEMP_RES_PATH, 'Res_text.txt') # 纯文本下载缓存路径
    USER_SETTINGS['DOC_PATH'] = os.path.join(TEMP_RES_PATH, 'Res_doc.docx') # doc文件下载缓存路径
    USER_SETTINGS['REPORT_PATH'] = os.path.join(TEMP_RES_PATH, 'Report_doc.docx') # doc文件下载缓存路径
    USER_SETTINGS['TB_PATH'] = os.path.join(TEMP_RES_PATH, 'Res_tb.xlsx') # 表格文件下载缓存路径
    USER_SETTINGS['MATCH_DF'] = os.path.join(TEMP_RES_PATH, 'temp_match_df.csv')
    USER_SETTINGS['TEMP_TITILE_PATH'] = os.path.join(TEMP_RES_PATH, 'temp_titles.json')
    
    USER_SETTINGS['KB_VEC_PATH'] = os.path.join(TEMP_RES_PATH, 'all_vec.npy')
    USER_SETTINGS['KB_PATH_VEC_PATH'] = os.path.join(TEMP_RES_PATH, 'all_path_vec.npy')
    USER_SETTINGS['KB_PATH_JSON'] = os.path.join(TEMP_RES_PATH, 'KB_path_dic.json')
    USER_SETTINGS['CORPUS'] = os.path.join(TEMP_RES_PATH, 'general_corpus.pkl')
    USER_SETTINGS['CORPUS_META'] = os.path.join(TEMP_RES_PATH, 'general_corpus_meta.pkl')
    USER_SETTINGS['KB_CONTENT_PATH'] = os.path.join(TEMP_RES_PATH, 'all_contents.csv')
    USER_SETTINGS['PARSED_FILE_PATH'] = os.path.join(TEMP_RES_PATH, 'Parsed_files.txt')
    USER_SETTINGS['PARSING_ISSUES'] = os.path.join(TEMP_RES_PATH, 'Pasing_issues.txt')
    
    USER_SETTINGS['DOC_INFO_PATH'] = os.path.join(TEMP_RES_PATH, 'api_info_doc.json') # doc文件模板的缓存路径
    USER_SETTINGS['INFO_PATH'] = os.path.join(TEMP_RES_PATH, 'api_info.json') # 大模型解析文件提纲缓存路径
    USER_SETTINGS['TEMP_INJECT_PATH'] = os.path.join(TEMP_RES_PATH, 'inject_know.json')
    USER_SETTINGS['TEMPLATE_DIR'] = os.path.join(KB_PATH, 'templates')
    
    USER_SETTINGS['TRAIN_DATA_GEN_QUERIES'] = os.path.join(TEMP_RES_PATH, 'fintune_data_gen_queries.jsonl')
    USER_SETTINGS['LOCAL_LEARN_DIR'] = os.path.join(TEMP_RES_PATH, 'userlearn_chk')
    USER_SETTINGS['TRAIN_DATA_PATH'] = os.path.join(TEMP_RES_PATH, 'fintune_data_path.jsonl') # supervised finetune with path
    USER_SETTINGS['TRAIN_DATA_CONTENT'] = os.path.join(TEMP_RES_PATH, 'fintune_data_content.jsonl') # supervised finetune with content
    USER_SETTINGS['TRAIN_DATA_BOTH'] = os.path.join(TEMP_RES_PATH, 'fintune_data_both.jsonl') # supervised finetune with both path and content    
    USER_SETTINGS['TRAIN_DATA_ALL_CONTENTS'] = os.path.join(TEMP_RES_PATH, 'fintune_data_all_contents.jsonl') # un-supervised finetune with raw contents
    
    model_config, llm_apis, model, tokenizer, local_llm, local_llm_tz, vectorize_texts, rerank_m3 = read_user_model_config(USER_SETTINGS)
    USER_SETTINGS['RERANKER_PATH'] = os.path.join(USER_SETTINGS['LOCAL_MODELS_DIR'], USER_SETTINGS['LOCAL_RERANKER'])
    opt_memory = CBD_Memory(USER_SETTINGS['TOP_K'],
                            int(USER_SETTINGS['EMBEDDING_LEN']), 
                            model,
                            tokenizer,
                            vectorize_texts,
                            USER_SETTINGS['N_TRIGGER'], 
                            USER_SETTINGS['L_RATE'], 
                            USER_SETTINGS['BATCH_SIZE'], 
                            USER_SETTINGS['N_EPOCHS'],
                            USER_SETTINGS['LOCAL_LEARN_DIR'])  
    
    if USER_SETTINGS['USE_LOCAL_LLM']:
        USER_SETTINGS['LLM_OUT_LIMIT'] = 600
    else:
        USER_SETTINGS['LLM_OUT_LIMIT'] = 1500
    
    if USER_SETTINGS['BN_RL']==True:
        st_time = time.time()
        fine_tuner = EncoderFinetuner(USER_SETTINGS, model_config)
        print('\tfine tunning model initialized, {}s consumed.'.format(np.round(time.time()-st_time, 2)))

        sequence_reasoner = opt_memory.marker_learner
        marker_reasoner = opt_memory.marker_learner
        st_time = time.time()
        print('\treasoner model initialized, {}s consumed.'.format(np.round(time.time()-st_time, 2)))
    else:
        fine_tuner = None
        marker_reasoner = None
        sequence_reasoner = None
    
    # title_model = TitleDetectionModel(os.path.join(USER_SETTINGS['LOCAL_MODELS_DIR'], SER_SETTINGS['LOCAL_BASE_ENCODER']))
    # title_tokenizer = BertTokenizer.from_pretrained(os.path.join(USER_SETTINGS['LOCAL_MODELS_DIR'], SER_SETTINGS['LOCAL_BASE_ENCODER'])
    # title_model = load_model(title_model, (os.path.join(USER_SETTINGS['LOCAL_MODELS_DIR'], SER_SETTINGS['TITLE_RECOG']))
    
    clean_file(USER_SETTINGS['MATCH_DF'], mode='clean', cols=match_df_cols)
    llm_histories = []
    train_multiplier = 1
    
    if USER_SETTINGS['USE_STOPWORDS']==True:
        stw_path = os.path.join(USER_SETTINGS['LOCAL_MODELS_DIR'], USER_SETTINGS['STOP_WORDS'])
        
        with open(stw_path, 'r', encoding='utf-8') as file:
            stopwords = set(line.strip() for line in file)
    else:
        stopwords = None
    
    print('\tcurrent embedding model: {}'.format(USER_SETTINGS['LOCAL_ENCODER']))
    return KB, KB_PATH, USER_SETTINGS, model_config, llm_apis, llm_histories, model, tokenizer, local_llm, local_llm_tz, vectorize_texts, opt_memory, fine_tuner, sequence_reasoner, marker_reasoner, train_multiplier, stopwords, rerank_m3



'''
    generate global variables based on user settings
'''
print('<阡陌--Checkerboard  beta_2.1> 启动中...')

user = '1245616416310234'
parent = None
print('\t当前用户：', user)

KB, KB_PATH, USER_SETTINGS, model_config, llm_apis, llm_histories, model, tokenizer, local_llm, local_llm_tz, vectorize_texts, opt_memory, fine_tuner, sequence_reasoner, marker_reasoner, train_multiplier, stopwords, rerank_m3 = settings_(user, parent)

USER_SETTINGS, full_path_vectors, full_path_ref, all_contents_df, all_vec, tb_record, img_record = encode_user_kb(KB_PATH, 
                                                                                                                    USER_SETTINGS, 
                                                                                                                    vectorize_texts, 
                                                                                                                    tokenizer, 
                                                                                                                    model,
                                                                                                                    know_df_cols, 
                                                                                                                    all_df_cols,
                                                                                                                    stopwords)
        
if not all_contents_df.empty:
    # all_contents_df.loc[len(all_contents_df)] = ['NULL', USER_SETTINGS['KB_PATH']+'-->null_path', 'PTXT', len('NULL'), 'NONE_KEYWORDS', 'NONE_SUMMARY', 'NONE_ID', 'NONE_TOKENS']
    # all_vec = np.vstack((all_vec, np.zeros((1, all_vec.shape[-1]), dtype=np.float32)))
    all_contents_tokens = [t.split('->') if not pd.isna(t) else 'NULL' for t in all_contents_df['tokens'].tolist()]
else:
    all_contents_tokens = [ ]
    
full_paths =list(full_path_ref.keys())
full_path_tokens = [d['tokens'].split('->') for d in full_path_ref.values()]

# gen_corpus(USER_SETTINGS)
# gen_query_data = gen_queries(all_contents_df, llm_apis, model_config, USER_SETTINGS, frac=0.2, content_cut=30)

print('<阡陌--Checkerboard  beta_2.1> 启动完成...')


if __name__ == "__main__":
    clean_kb(USER_SETTINGS)
   
    # gen_queries(all_contents_df, llm_apis, model_config, USER_SETTINGS, frac=1, content_cut=200)  
    # gen_query_data = load_gen_queries(USER_SETTINGS)
    # gen_train_data_from_queries(gen_query_data, all_contents_df, full_path_vectors, all_vec, tokenizer, model, vectorize_texts, USER_SETTINGS)
    # gen_train_data_from_contents(all_contents_df, full_path_vectors, all_vec, tokenizer, model, vectorize_texts, USER_SETTINGS)
    
    # with open(USER_SETTINGS['TRAIN_DATA_GEN_QUERIES'], 'r', encoding='utf-8') as file:
    #     query_len = []
    #     passage_len = []
    #
    #     for obj in file:
    #         # print(obj)
    #         sample = json.loads(obj)
    #
    #         if sample['query']==None:
    #             continue
    #         else:
    #             with open(r'./finetune_data_clean.jsonl', 'a', encoding='utf-8') as f:
    #                 f.write(json.dumps(obj, ensure_ascii=False) + '\n')
    #
    #             query_len.append(len(sample['query']))
    #             for p in sample['pos']:
    #                 passage_len.append(len(p))
    #
    #             for p in sample['neg']:
    #                 passage_len.append(len(p))
    #
    #     print(np.mean(query_len), ' ', np.mean(passage_len))
    
    # finetune_mode = 'use content'
    # finetune_source = 'queries'
    # e = EncoderFinetuner(USER_SETTINGS, model_config, source=finetune_source, mode=finetune_mode)
    # fintune_model, fintune_tokenizer, logger = e.model_setting()
    # e.model_finetuning()
    # e.model_fusing()

    
    # gen_mat = "5.2.8 对于成套供应的柜、台、箱的内部接线，一般由制造商完成。规定指的是柜、台、箱的二次回路接线和自制配电箱（盘）的配线要求。当这种设备用于因设计变更而进行的施工现场时，适用于柜、台、箱的二次回路接线的修改。规定电流回路的导线截面积不得小于2.5 mm²。'"
    # print('\npreparing RFI:\n')
    # query_gen, _ = use_llm_api(llm_apis['qwen_api'],
    #                             histories=[],
    #                             paras={'task':'gen-ques', 
    #                                    'query':'', 
    #                                    'texts':gen_mat,
    #                                    'model':'plus'},
    #                             config=model_config,
    #                             settings=USER_SETTINGS,
    #                             record_his=False)
    #
    # print('\nthe generated RFI is \n{}'.format(query_gen))

    print()









