from CyberU import *

import json
file_str_suffix='_backup2'
f=f_out=rtxt(f'./content/qac/v1/data_{split_scale[0]}_{split_scale[1]}{file_str_suffix}.jsonl')
lis_q= ['提问：','问题：',"question：",'**Question:**','**question:**','Q:']
lis_a= ['回答：',"answer：",'**Ansmax_wer:**','**answer:**','A:']
# split_scale=(3000,400)
# split_scale=(3000,400)
split_scale=(1800,800)
document_root='./content'
paths=(extract_paths(root=document_root))
qac_pairs_path=f'./content/qac//v1/data_{split_scale[0]}_{split_scale[1]}.jsonl'
_=[]


def merge_qa_query(s):
    temp_prompt_prev = '以下一个地球化学领域机器学习的部分 doc 文档手册的内容：\n'
    temp_prompt_next = '\n现在，根据以上内容生成一个 qa 对，第一段是提问，第二段是回答。如下：\n'
    return temp_prompt_prev+s+temp_prompt_next
def dumps(d):
    return json.dumps(d,ensure_ascii=False)
def consume(func):
    import time
    def wrapper(*args, **kwargs):
        start_time = time.time()  # 记录开始时间
        result = func(*args, **kwargs)  # 执行函数并获取结果
        end_time = time.time()  # 记录结束时间
        print(f"{func.__name__} 执行 {end_time - start_time} s.")  # 打印执行时间
        return result  # 返回函数执行的结果
    return wrapper

def extract_paths(filters=['md'],root=None):
    import os
    ret = []  # 存储找到的.md文件的路径
    for root, dirs, files in os.walk(root):
        for file in files:
            for filter in filters:
                if file.endswith(filter):
                    ret.append(os.path.join(root, file))
    return ret

def md2text(file_path):
    import re
    with open(file_path, 'r', encoding='utf-8') as file:
        md_text = file.read()
        # 移除图片链接
        text = re.sub(r'!\[[^\]]*\]\([^\)]+\)', '', md_text)
        # 移除图片链接
        text = re.sub(r'!\[[^\]]*\]\([^\)]+\)', '', md_text)
        # 移除普通链接
        # text = re.sub(r'\[[^\]]*\]\([^\)]+\)', '', text)
        # 移除加粗、斜体、代码等Markdown语法
        # text = re.sub(r'`|~~|[*_]', '', text)
        # 可以添加更多的正则表达式来处理其他Markdown语法
        text=text.split('\n')
        text=[_ for _ in text if not '<img' in _ and not ''==_ and not '\n'==_]
        text="\n".join(text)
        return text.strip()

def split_string_into_fixed_length_chunks(s, chunk_length, overlay_length):
    if overlay_length >= chunk_length:
        raise ValueError("Overlay length must be less than chunk length")
    return [s[i:i+chunk_length] for i in range(0, len(s) - chunk_length, chunk_length - overlay_length)]

split_document_str=split_string_into_fixed_length_chunks

def out(content_list):
    import subprocess
    import sys,os
    filename='temp.txt'
    with open(filename, 'w', encoding='utf-8') as f:
        for item in content_list:
            f.write(f"{str(item)}\n")
    try:
        os.startfile(filename)
    except AttributeError:
        # 如果是非Windows系统，则使用subprocess
        subprocess.call(['open', filename])

def read_pickle(file_path):
    import pickle
    try:
        with open(file_path, 'rb') as file:
            return pickle.load(file)
    except Exception as e:
        print(f"读取pickle文件时出错: {e}")
        return None


def write_pickle(data, file_path):
    import pickle
    try:
        with open(file_path, 'wb') as file:
            pickle.dump(data, file)
    except Exception as e:
        print(f"写入pickle文件时出错: {e}")

def yi_online_stream(content, stream=False,url=None):
    import json,requests
    url = "http://36.140.172.136:11100/v1/chat/completions"
    # url = '/data/LLMs/01ai/Yi-34B-Chat'

    headers = {
        "accept": "application/json",
        "Content-Type": "application/json"
    }

    payload = {
        # "model": "string",
        # "messages": [
        #     {
        #         "role": "user",
        #         "content": content
        #     }
        # ],
        # "do_sample": True,
        # "temperature": 0.6,
        # "top_p": 0.8,
        # "max_tokens": 4096,
        # "stream": stream
    }

    # 将payload转为JSON格式
    payload_json = json.dumps(payload)
    if stream:
        with requests.post(url, headers=headers, data=payload_json, stream=True) as response:
            for chunk in response.iter_content(chunk_size=1024):
                if chunk:
                    chunk = chunk[5:].decode('utf-8').strip()
                    if chunk == '[DONE]':
                        break
                    print(123)
                    yield json.loads(chunk).get('choices', [])[0].get('delta', {}).get('content', '')
    else:
        # 发送POST请求
        response = requests.post(url, headers=headers, data=payload_json)
        # 打印响应内容
        # return json.loads(response.text)['choices'][0]['message']['content']
        yield json.loads(response.text)['choices'][0]['message']['content']


@consume
def yi_online(query=None):
    try:
        ret= (''.join(yi_online_stream(query)))
    except Exception as e:
        print(e)
    finally:
        print('yi 异常，正在重试。。。')
        return yi_online(query=query)

def yi_online2(query=None):
    import requests
    url = "http://36.140.172.136:60066/generate"
    data = {
        "input_text": query
    }
    response = requests.post(url, json=data)
    if response.status_code == 200:
        generated_text = response.json().get("generated_text", "")
        if query in generated_text:
            generated_text = generated_text.replace(query,'')
        return generated_text
    else:
        print("请求失败，状态码:")
        print(response.status_code)
        exit()

yi=yi_online2


def gather_qa_data(s):
    print('生成内容长度：',len(s))
    print('******************generated begin')
    print(s)
    print('******************end')
    ret=[]
    question=answer=status=''
    for p in s.split('\n'):
        for test_str in lis_q:
            if test_str in p:
                    ret.append({'question':question,'answer':answer})
                    status='question'
                    question=answer=''
        for test_str in lis_a:
            if test_str in p:
                status='answer'
        if status=='question':
            question+=p+'\n'# 检查
        if status=='answer':
            answer+=p+'\n'
    ret.append({'question':question,'answer':answer})
    return ret[1:]

raw2qa_dicts=gather_qa_data

def gpt_autom():
    print(pic_path())
    switching_apps(pic_path=pic_path('web_gpt_3-5_recoginize'))

# chat=gpt_autom
chat=gpt_communication

def generate_qac():
    from tqdm import tqdm
    import json
    id=0
    ret=[]
    for path in tqdm(paths[:]):
        print(paths.index(path),path)
        for splitted_content in split_document_str(md2text(path),*split_scale):
            file=open(qac_pairs_path, 'a', encoding='utf-8')
            print(id,' using')
            generate_qa_raw=yi(merge_qa_query(splitted_content))
            generate_qa=gather_qa_data(generate_qa_raw)
            formatted_dict={'id':id,'source':path[path.find('\\docs'):],'content':splitted_content,'qa':generate_qa}
            ret.append(formatted_dict)
            json_str = dumps(formatted_dict)
            file.write(json_str + '\n')
            file.close()
            id+=1
            print(id)
    return ret

def validate(l):
    if l==[]:
        return False
    for qa in l:
        if qa['question']=='' or qa['answer']=='':
            return False
    return True

def add_qas():
    pass

def regenerate_qac():
    file=open(qac_pairs_path, 'r', encoding='utf-8')
    contents=file.readlines()
    for index,l in enumerate(contents):
        import json
        d=jl=json.loads(l)
        if not validate(d['qa']):
            print(f'检测到{d["id"]}异常')
            generate_qa_raw=chat(content=merge_qa_query(d['content']))
            generate_qas=gather_qa_data(generate_qa_raw)
            # generate_qas=generate_qas+gather_qa_data(generate_qa_raw)
            d.update({'qa':generate_qas})
            contents[index]=dicttojson(d)
            # f_out.delete(l,save=False)
            # f_out.add(add_qas(d=d,_))
            with open(qac_pairs_path, 'w', encoding='utf')as f:
                for _ in contents:
                    f.write(dumps(json2dict(_))+'\n')

