# %%

import argparse

from dotenv import load_dotenv
import os
from typing import List, Tuple, Union, Dict, Any
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from langchain.chains import LLMChain
import pandas as pd
import json
from instruction_re.utils.data_utils import get_data_config

load_dotenv()

params = {}

BASE_URL = "https://one.aiskt.com/v1"


params["model_name"] = "gpt-3.5-turbo"
params["temperature"] = 0
params["api_key"] = os.environ.get("API_KEY")
params["base_url"] = BASE_URL


def generate_sentences_from_tuples(
    tuples: List[Union[Tuple[str, str, str], Tuple[str, str, str, str, str]]],
    num_sentences: int,
    api_key: str,
    model_name: str = "gpt-3.5-turbo",
) -> List[str]:
    """
    Generate sentences from a list of triples or quintuples using LangChain and OpenAI.

    :param tuples: A list of tuples, each either (subject, predicate, object) or
                   (head_type, head, relation, tail, tail_type)
    :param num_sentences: Number of sentences to generate
    :param api_key: OpenAI API key
    :param model_name: Name of the OpenAI model to use
    :return: A list of generated sentences
    """
    # Convert tuples to a formatted string
    formatted_tuples = []
    for t in tuples:
        if len(t) == 3:
            formatted_tuples.append(f"({t[0]}, {t[1]},{t[2]})")
        elif len(t) == 5:
            formatted_tuples.append(f"({t[0]},{t[1]},{t[2]},{t[3]},{t[4]})")
    tuples_str = ", ".join(formatted_tuples)

    tuples_str = f"[{tuples_str}]"

    #     template = """
    # You are a language expert tasked with generating sentences based on given information. The information is provided as a list of triples or quintuples. Your goal is to create {num_sentences} diverse, natural-sounding sentences that accurately represent ALL the information provided in EACH sentence.

    # Given information:
    # {tuples}

    # Please generate {num_sentences} sentences based on this information. Each sentence should:
    # 1. Incorporate ALL elements from ALL the provided tuples/quintuples
    # 2. Be grammatically correct and natural-sounding
    # 3. Vary in structure and complexity from other generated sentences
    # 4. Aim for a balance between complexity and clarity. Create sentences that are more sophisticated in structure and vocabulary, while still maintaining readability. Don't shy away from using compound or complex sentence structures, subordinate clauses, or advanced linguistic devices (e.g., appositions, participle phrases) when appropriate. The goal is to produce rich, nuanced sentences that effectively convey all the given information.
    # 5. Use different wordings, sentence structures, and information ordering for each generated sentence
    # 6. Consider the Various representation for relation between entities

    # Important: Ensure that EVERY generated sentence contains ALL the information from ALL given tuples/quintuples. Do not omit any information from any tuple in any sentence.

    # Provide your response as a numbered list of {num_sentences} sentences. Do not include any additional explanations or comments.

    #     """
    template = """
    

# Enhanced Triple-to-Text Generation Prompt

You are an expert linguist and creative writer tasked with generating sophisticated sentences based on given information. The information is provided as a list of triples or quintuples. Your goal is to create {num_sentences} diverse, nuanced, and engaging sentences that accurately represent ALL the information provided in EACH triple/quintuple, while avoiding simplistic or shallow representations.

Given information:
{tuples}

Please generate {num_sentences} sentences based on this information. Each sentence should:

1. Incorporate ALL elements from ALL the provided triples/quintuples.
2. Be grammatically correct and natural-sounding.
3. Demonstrate significant variety in structure, complexity, and vocabulary from other generated sentences.
4. Aim for sophistication and depth. Create sentences that go beyond surface-level descriptions, delving into implications, contexts, or deeper meanings of the relationships described in the triples/quintuples.
5. Utilize advanced linguistic techniques such as:
   - Complex sentence structures (e.g., compound-complex sentences)
   - Subordinate clauses
   - Appositive phrases
   - Participle phrases
   - Metaphors or analogies when appropriate
   - Idiomatic expressions related to the concepts
6. Reframe the relationships in creative ways. For example, instead of saying "X is based in Y," consider phrases like "X has established its roots in Y," "Y serves as the backdrop for X's global operations," or "X's presence is woven into the fabric of Y's business landscape."
7. Incorporate relevant context or background information that enriches the understanding of the relationship, without introducing unsubstantiated facts.
8. Consider the broader implications or significance of the relationship described in the triple/quintuple.
9. Vary the focus or perspective of each sentence. Some might emphasize the subject, others the object, and others the relationship itself.
10. Use diverse and precise vocabulary, especially when describing relationships. Avoid repetitive or generic terms.

Example:
For a triple like (Microsoft, organization based in, America), instead of "Microsoft is an organization based in America," aim for sentences like:

"As a titan of the tech industry, Microsoft has not only shaped the digital landscape globally but has also become an integral part of America's innovation ecosystem, with its roots firmly planted in the fertile soil of U.S. entrepreneurship."

Important: Ensure that EVERY generated sentence contains ALL the information from ALL given triples/quintuples, while maintaining a high level of sophistication and engagement. Do not omit any information from any triple in any sentence.

Provide your response as a numbered list of {num_sentences} sentences. Do not include any additional explanations or comments.
    """

    prompt = PromptTemplate(
        input_variables=["num_sentences", "tuples"],
        template=template,
    )

    # Initialize the language model
    llm = ChatOpenAI(
        temperature=0.7,
        model_name=model_name,
        api_key=api_key,
        base_url=params["base_url"],
    )

    # Create the LLMChain
    chain = LLMChain(llm=llm, prompt=prompt)

    # Generate the sentences
    result = chain.run(num_sentences=num_sentences, tuples=tuples_str)

    # Process the result
    sentences = result.strip().split("\n")
    # Remove numbering and any leading/trailing whitespace
    sentences = [
        s.split(". ", 1)[1].strip() if ". " in s else s.strip() for s in sentences
    ]

    return sentences


def generate_sentences_for_df(
    df: pd.DataFrame,
    generate_sentences_from_tuples: callable,
    params: Dict[str, Any],
) -> pd.DataFrame:
    """
    为DataFrame中的每一行生成一个句子。

    :param df: 输入的DataFrame
    :param generate_sentences_from_tuples: 生成句子的函数
    :param params: 包含API密钥和模型名称的字典
    :return: 添加了生成句子的DataFrame
    """

    def process_row(row: pd.Series) -> str:
        tuples = [
            (
                d["head"]["name"],
                d["head"]["type"],
                d["type"],
                d["tail"]["name"],
                d["tail"]["type"],
            )
            for d in row.relations
        ]
        sentences = generate_sentences_from_tuples(
            tuples,
            num_sentences=1,
            api_key=params["api_key"],
            model_name=params["model_name"],
        )
        return sentences[0]
        # # return [row.to_dict() | {"sentence": s} for s in sentences]
        # new_rows = []
        # for s in sentences:
        #     new_row = row.to_dict()
        #     new_row.update({"sentence": s})
        #     new_rows.append(new_row)
        # return new_rows

    # 针对前count行进行处理
    # print("Before applying process_row:")
    # print(df.columns)
    # print(df.head())

    df["sentence"] = df.apply(process_row, axis=1)
    return df

    # print("After applying process_row:")
    # print(df.columns)
    # print(df.head())

    # return df
    # 针对前count行进行处理
    # new_df = df.head(count).apply(process_row, axis=1).explode().tolist()
    # return new_df
    # df_head = df.head(count)
    # # new_rows = [item for items in df_head.apply(process_row, axis=1) for item in items]

    # # 创建一个新的DataFrame，包含所有生成的行
    # new_df = pd.DataFrame(new_rows)
    # return new_df


# 定义该脚本接收的参数,dataset
# import argparse
# parser = argparse.ArgumentParser()


# parser.add_argument(
#     "--dataset_name",
#     type=str,
#     default="SciERC",
#     help="dataset name",
# )
# 过滤不合格数据
def filter_data(data):
    entity_list = set()
    for r in data["relations"]:
        head = r["head"]["name"]
        tail = r["tail"]["name"]

        entity_list.add(head)
        entity_list.add(tail)

    sentence = data["sentence"]
    # 检查生成的文本是否包含全部实体，如果不包含，则过滤掉
    for entity in entity_list:
        if entity not in sentence:
            return False

    return True


def remove_pos_attr(data):
    """清理数据集relations字段中head 和 tail 的pos属性"""
    relations = []
    for r in data["relations"]:
        relations.append(
            {
                "head": {"name": r["head"]["name"], "type": r["head"]["type"]},
                "type": r["type"],
                "tail": {"name": r["tail"]["name"], "type": r["tail"]["type"]},
            }
        )
    return {
        "sentence": data["sentence"],
        "relations": relations,
    }


if __name__ == "__main__":
    # args = parser.parse_args()
    # root_data_dir = "{base_dir}/conll04/few_shot_50"
    # origin_train_path = (
    #     "{base_dir}/conll04/few_shot_50.json"
    # )

    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--dataset_name",
        type=str,
        default="conll04",
        help="dataset name",
    )

    parser.add_argument(
        "--root_data_dir",
        type=str,
        default="/home/wangxiaoli/datasets/IE_INSTRUCTIONS/RE",
        help="root data directory",
    )

    parser.add_argument(
        "--rules",
        type=str,
        help="用于增强数据集的规则",
        default="add_triple origin remove_triple replace_same_type_entity swap_triple",
    )

    parser.add_argument(
        "--aug_times",
        type=int,
        default=1,
        help="数据增强的倍数",
    )

    parser.add_argument(
        "--is_few_shot",
        type=int,
        default=1,
        help="是否是few shot",
    )

    parser.add_argument(
        "--few_shot_num",
        type=int,
        default=50,
        help="few shot的数量",
    )

    parser.add_argument(
        "--save_dir_name",
        type=str,
        help="用于辅助合成数据保存的文件夹名",
    )

    args = parser.parse_args()

    dataset_name = args.dataset_name
    # dataset_name = "conll04"
    is_few_shot = args.is_few_shot
    few_shot_num = args.few_shot_num
    base_dir = f"{args.root_data_dir}/{dataset_name}"
    save_dir = f"{args.root_data_dir}/{dataset_name}/{args.save_dir_name}"
    os.makedirs(save_dir, exist_ok=True)

    origin_train_path = get_data_config(
        args.root_data_dir,
        dataset_name,
        is_few_shot=is_few_shot,
        few_shot_num=few_shot_num,
    )["train"]

    print(origin_train_path)

    with open(
        f"{save_dir}/args.json",
        "w",
    ) as f:
        json.dump(vars(args), f, ensure_ascii=False, indent=4)

    print(origin_train_path)

    print(args)

    # return
    # import sys

    # sys.exit(0)

    # root_data_dir = f"{base_dir}/few_shot_{few_shot_num}_{args.aug_times}"
    # root_data_dir = f"{base_dir}/full_data_{args.aug_times}"
    root_data_dir = f"{base_dir}/few_shot10%_{args.aug_times}"
    # generate_llm_dir = f"{base_dir}/llm/few_shot_{few_shot_num}"
    generate_llm_dir = save_dir

    final_merged_path = f"{save_dir}/merge_train_dataset.json"

    # merged_df = pd.DataFrame()
    merged_df_list = []
    rules = args.rules.split()

    add_num = 20

    if few_shot_num == 100:
        add_num = 20
    elif few_shot_num == 150:
        add_num = 30

    origin_train_dataset = pd.read_json(origin_train_path, lines=is_few_shot)
    origin_count = origin_train_dataset.shape[0]

    aug_total_num = origin_count * args.aug_times
    per_rule_count = aug_total_num // len(rules) + add_num

    # 从root_data_dir中所有.json 文件
    for file in os.listdir(root_data_dir):
        if file.endswith(".json"):
            file_path = os.path.join(root_data_dir, file)

            # 得到file_path的文件名
            file_name = os.path.basename(file_path)
            save_file_name = os.path.splitext(file_name)[0]

            if save_file_name not in rules:
                continue

            df = pd.read_json(file_path, lines=True, nrows=per_rule_count)
            df = generate_sentences_for_df(
                df,
                generate_sentences_from_tuples,
                params,
            )

            # 过滤不合格数据
            df = df[df.apply(filter_data, axis=1)]
            # df = df.apply(remove_pos_attr, axis=1)

            merged_df_list.append(df)

            os.makedirs(
                generate_llm_dir,
                exist_ok=True,
            )

            df.to_json(
                f"{generate_llm_dir}/{save_file_name}.json",
                orient="records",
                lines=True,
            )
            print(f"Generated sentences for {file_path}")

    merged_df = pd.concat(merged_df_list, ignore_index=True)

    print(merged_df.head())

    # 打印去重前后的数量
    print(f"去重前: {merged_df.shape}")
    merged_df = merged_df.drop_duplicates(subset=["sentence"])
    print(f"去重后: {merged_df.shape}")

    final_merged_df = pd.concat(
        [merged_df.head(aug_total_num), origin_train_dataset],
        ignore_index=True,
    )

    final_merged_df = final_merged_df.apply(remove_pos_attr, axis=1)

    # 保存

    final_merged_df.to_json(
        final_merged_path,
        orient="records",
        lines=True,
    )
