import json
import sys
import os
import re
import pandas as pd
import numpy as np
import threading
import time
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
from transformers import AutoTokenizer
from sentence_transformers import SentenceTransformer
from datasets import load_dataset

sys.path.append(os.path.abspath('../Checkerboard'))
# from META import llm_apis, llm_histories, local_llm, local_llm_tz, model_config, USER_SETTINGS, stopwords
# from txt_parser import extact_local_keywords2summary
from utlis import restore_graph_by_paths, extract_nested_dic_vals, process_dup_paths_df, gen_str_codes
from knowledge_generator import process_full_contents


'''
    initial settings
'''
full_resouce_path = r'D:\datasets\wiki_data.jsonl'
entry_root = r'D:\datasets\知识固化库_Wiki'

user = 'Wiki'
_model = None
_tokenizer = None


'''
    develop the vector databse
'''
def time_eval(func):
    def wrapper(*args, **kwargs):
        start = time.time()
        result = func(*args, **kwargs)
        end = time.time()
        print(f"\n⏱️ 用时: {end - start:.2f} 秒")
        return result
    return wrapper


def iter_jsonl(file_path):
    with open(file_path, 'r', encoding='utf-8-sig') as f:
        for line in f:
            yield json.loads(line)


def reset():
    return [], [], [], [], [], [], [], []


def clean_path(path_txt):
    path_txt = re.sub(r'_\d+\s*$', '', path_txt.strip())
    parts = [part.strip() for part in path_txt.split('-->')]
    if not parts or all(p == '' for p in parts):
        return ''
    if len(parts) == 1:
        return parts[0]

    result = [part for i, part in enumerate(parts) if i != 0 and i != 2]
    if not result:
        return ''
    return ' --> '.join(result)


def postprocess_contents(contents, dic_keys, para_path, max_words=128):
    def split_by_chunks(text):
        sentences = re.split(r'(?<=[.?!])\s+', text.strip())
        chunks = []

        for sent in sentences:
            words = sent.strip().split()
            for i in range(0, len(words), max_words):
                chunk = ' '.join(words[i:i+max_words])
                if chunk:
                    chunks.append(chunk)
        return chunks if chunks else [text]   

    contents = [c.strip() for c in contents if not re.sub('__HHF__', '', c).strip()=='']
    contents = [c.replace('__HHF__', '').replace('Unnamed', '').replace('。', '').strip() for c in contents]
    contents = [re.sub(r'[.?!]+$', '', '\n'.join(dic_keys)) +' ' + c for c in contents]

    all_chunks = []
    for text in contents:
        chunks_ = split_by_chunks(text)
        all_chunks.extend(chunks_)

    chunk_kids = [gen_str_codes(para_path+c) for c in all_chunks]
    chunk_lens = [len(c.split()) for c in all_chunks]
    chunk_num = len(all_chunks)
    return all_chunks, chunk_num, chunk_lens, chunk_kids
    

def process_data(user, full_resouce_path, split_char='-->', batch_size=1000):
    with open(full_resouce_path, "r", encoding="utf-8-sig") as f:
        total_lines = sum(1 for _ in f)

    count_ = 0
    batch_id = 0
    added_paths, added_contents, added_types, added_knowids, added_lengths, added_keywords, added_summaries, paths_graph = reset()
    
    for data in iter_jsonl(full_resouce_path):
        wiki_id = data["wikipedia_id"]
        category = data['categories']
        title = data['wikipedia_title']
        data_content = '。'.join(data["text"])
        paras = data_content.split('Section::::')

        for j, para in enumerate(paras):
            # *** consider generate sub-topics ***  we can comapre two options
            inner_key = para.split('\n')[0]
            para_path = split_char.join([category, ('article ' + wiki_id), title, inner_key])
            paths_graph.append(para_path)
            system_path = clean_path(user+'-->'+para_path)

            know_dic, _ = process_full_contents(para, inner_key)
            dic_keys = [e for e in list(know_dic.keys()) if e!='UNK']
            contents_ = extract_nested_dic_vals(know_dic)
            contents_, len_, content_lens, know_ids = postprocess_contents(contents_, dic_keys, para_path+str(j), MAX_WORDS)

            # if USER_SETTINGS['LOCAL_SUMMARY']:
            #     keywords, summary = extact_local_keywords2summary(para, llm_apis['local_api'], llm_histories, USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, model_config, add_req='')
            # else:
            keywords = ['']*len_
            summary = ['']*len_
            matches = ['PTXT']*len_ # currently only text benchmarking

            added_contents.extend(contents_)
            added_paths.extend([system_path]*len_)
            added_types.extend(matches)
            added_knowids.extend(know_ids)
            added_lengths.extend(content_lens)
            added_keywords.extend(keywords)
            added_summaries.extend(summary)
        
        count_ += 1
        if count_ % batch_size == 0: # === 每BATCH_SIZE篇文章处理完后，保存一次 ===
            out_kb_dir = rf'D:\datasets\知识固化库_Wiki\Articles_{batch_id}'
            os.makedirs(out_kb_dir, exist_ok=True)
            content_df = pd.DataFrame({'content':added_contents, 'path':added_paths, 'type':added_types, 'length':added_lengths, 'keywords':added_keywords, 'summary':added_summaries, 'know_id':added_knowids})
            content_df.to_csv(os.path.join(out_kb_dir, 'KB_PTXT.csv'), encoding='utf-8-sig', index=False)
            print(f"💾 已保存 batch {batch_id} 👉 共 {len(content_df)} 条记录")

            graph, _ = restore_graph_by_paths(paths_graph, split_char, '__摘要总结__')
            graph_path = os.path.join(out_kb_dir, 'graph.json')
            with open(graph_path, 'w', encoding='utf-8-sig') as f:
                json.dump(graph, f, ensure_ascii=False, indent=4)
            
            added_paths, added_contents, added_types, added_knowids, added_lengths, added_keywords, added_summaries, paths_graph = reset() # 重置临时数据
            batch_id += 1
        
        percent = count_ / total_lines * 100 # 输出进度
        print(f"✅ 已处理 {count_:,}/{total_lines:,} ({percent:.5f}%)")

    if added_contents:
        out_kb_dir = rf'D:\datasets\知识固化库_Wiki\Articles_{batch_id}'
        os.makedirs(out_kb_dir, exist_ok=True)
        content_df = pd.DataFrame({'content':added_contents, 'path':added_paths, 'type':added_types, 'length':added_lengths, 'keywords':added_keywords, 'summary':added_summaries, 'know_id':added_knowids})
        content_df.to_csv(os.path.join(out_kb_dir, 'KB_PTXT.csv'), encoding='utf-8-sig', index=False)
        print(f"💾 已保存最后一批 batch {batch_id} 共 {len(content_df)} 条记录")

        graph, _ = restore_graph_by_paths(paths_graph, split_char, '__摘要总结__')
        graph_path = os.path.join(out_kb_dir, 'graph.json')
        with open(graph_path, 'w', encoding='utf-8-sig') as f:
            json.dump(graph, f, ensure_ascii=False, indent=4)


'''
    vectorize full dataset
'''
def chunks(lst, n):
    """Yield successive n-sized chunks from lst."""
    for i in range(0, len(lst), n):
        yield lst[i:i + n]


def get_encoder(USER_SETTINGS, max_length=512):
    global _model, _tokenizer
    if _model is None or _tokenizer is None:
        print(f"[PID {os.getpid()}] 🧠 Initializing model...", flush=True)
        encoder_path = os.path.join(USER_SETTINGS['LOCAL_MODELS_DIR'], USER_SETTINGS['LOCAL_ENCODER'])
        _tokenizer = AutoTokenizer.from_pretrained(encoder_path, trust_remote_code=True)
        model_kwargs = {'device': 'cuda'}
        _model = SentenceTransformer(encoder_path, model_kwargs, device='cuda')
        _model.max_seq_length = max_length
        print(f"[PID {os.getpid()}] ✅ Model initialized", flush=True)
    return _tokenizer, _model


def encode_batch(USER_SETTINGS, texts, batch_size=512, max_len=512):
    _, model_ = get_encoder(USER_SETTINGS, max_length=max_len) # can adjust the max-length in case of OOM
    embeddings = model_.encode(texts, batch_size=batch_size, show_progress_bar=False, convert_to_numpy=True)
    return embeddings


def save_content_vecs(output_dir, paths, content_vecs):
    data_path = os.path.join(output_dir, f"vecs_contents.npz")
    np.savez_compressed(
        data_path,
        paths=np.array(paths),
        content_vecs=content_vecs
    )


def save_path_vecs(output_dir, paths, path_vecs):
    data_path = os.path.join(output_dir, f"vecs_paths.npz")
    np.savez_compressed(
        data_path,
        paths=np.array(paths),
        path_vecs=path_vecs
    )


def process_folder(folder_path, USER_SETTINGS, file_name='KB_PTXT.csv', batch_size=512):
    thread_id = threading.get_ident()
    data_path = os.path.join(folder_path, file_name)
    if not os.path.exists(data_path):
        print(f"[Thread-{thread_id}] ❌ File not found: {data_path}")
        return
    
    df = pd.read_csv(data_path, encoding='utf-8-sig', index_col=False)
    df_len = len(df)
    if df_len == 0:
        print(f"[Thread-{thread_id}] ⚠️ Empty CSV in {folder_path}, skipped")
        return
    
    all_paths, all_content_vecs, all_path_vecs = [], [], []

    # 分批处理
    for batch_id, i in enumerate(range(0, df_len, batch_size)):
        batch = df.iloc[i:i+batch_size]
        contents = batch['content'].tolist()
        paths = batch['path'].tolist()

        # 向量化
        print(f"[Thread-{thread_id}] 🟡 Start batch {batch_id} in {folder_path} ({i+1}/{df_len})")
        start_time = time.time()

        content_vecs = encode_batch(USER_SETTINGS, contents, batch_size)
        # path_vecs = encode_batch(USER_SETTINGS, paths, batch_size)

        # 写入向量数据库
        all_paths.extend(paths)
        all_content_vecs.append(content_vecs)
        # all_path_vecs.append(path_vecs)
        print(f"[Thread-{thread_id}] ✅ Finished batch {batch_id} | Size: {len(batch)} | Time: {(time.time()-start_time):.2f}s | Progress: {(i+len(batch))/df_len:.2%}")

    all_content_vecs = np.concatenate(all_content_vecs, axis=0)
    assert len(all_paths) == all_content_vecs.shape[0]
    save_content_vecs(folder_path, all_paths, all_content_vecs)
    # all_path_vecs = np.concatenate(all_path_vecs, axis=0)
    # save_path_vecs(folder_path, paths, all_path_vecs)

    with open("progress.log", "a", encoding="utf-8") as f:
        f.write(f"{folder_path} done\n")


@time_eval
def run_threaded_parallel(folder_paths, USER_SETTINGS, max_threads):
    def worker(folder):
        try:
            process_folder(folder, USER_SETTINGS)
        except Exception as e:
            print(f"[ERROR] {folder} failed: {e}")
            return folder
        return None

    with ThreadPoolExecutor(max_workers=max_threads) as executor:
        results = executor.map(worker, folder_paths)
    return [r for r in results if r is not None]


@time_eval
def run_serial(folder_paths):
    failed = []
    for folder in folder_paths:
        try:
            process_folder(folder, USER_SETTINGS, batch_size=1)
        except Exception as e:
            print(f"[Serial] Failed to process {folder}")
            failed.append(folder)
    return failed


if __name__ == "__main__":
    USER_SETTINGS = {'LOCAL_SUMMARY':False, 'LOCAL_MODELS_DIR':r'D:\OneDrive\Code Warehouse\Prototype\Pretrained models', 'LOCAL_ENCODER':'BAAI'}
    MAX_WORDS = 100
    ARTICLE_BATCH_SIZE = 50
    SPLIT_CHAR = '-->'

    VEC_BATCH_SIZE = 256
    MAX_WORKERS = max(2, multiprocessing.cpu_count()//2)
    GROUP_NUM = 5

    # print("\n🚀 正在【预处理原始数据】：")
    # process_data(user, full_resouce_path, SPLIT_CHAR, ARTICLE_BATCH_SIZE)


    folder_paths = [os.path.join(entry_root, dir_) for dir_ in os.listdir(entry_root) if dir_.startswith('Articles_')]
    folder_paths = folder_paths[:20000]

    # print("\n🔁 正在运行【串行】版本：")
    # serial_failed = run_serial(folder_paths)

    print("\n🚀 正在运行【多进程】版本 使用{}个线程：".format(MAX_WORKERS))
    parallel_failed = run_threaded_parallel(folder_paths, USER_SETTINGS, max_threads=MAX_WORKERS)


    # if all_failed:
    #     print(f"\n⚠️ 共失败 {len(all_failed)} 个文件夹，保存至 failed_folders.txt")
    #     with open(os.path.join(entry_root, r"failed_folders.txt"), "w", encoding="utf-8-sig") as f:
    #         for path in all_failed:
    #             f.write(path + "\n")
    # else:
    #     print("\n✅ 所有文件夹处理成功！")


    '''
        load dataset and test
    '''

    # np.save(os.path.join(out_kb_dir, f"wiki_vecs_{batch_id:05d}.npy"), vec_records)

    # kilt_nq = load_dataset("kilt_tasks", "nq", split="validation")
    
    # for item in tqdm(kilt_nq):
    #     query = item['input']
    #     gold_docs = [p["wikipedia_id"] for p in item["output"][0]["provenance"]]
    #     print(query)