import argparse
import os
import json
import logging
from typing import List, Tuple, Union, Dict, Any
from itertools import islice

import pandas as pd
from dotenv import load_dotenv
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from langchain.chains import LLMChain
from tqdm import tqdm
from datasets import load_dataset

from instruction_re.utils.data_utils import get_data_config

# Set up logging
logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)

# Load environment variables
load_dotenv()

# Configuration
CONFIG = {
    "model_name": "gpt-3.5-turbo",
    "temperature": 0,
    "api_key": os.environ.get("API_KEY"),
    "base_url": "https://one.aiskt.com/v1",
    "batch_size": 32,  # Number of rows to process in each batch
}

# Prompt template
TEMPLATE = """
    

# Enhanced Triple-to-Text Generation Prompt

You are an expert linguist and creative writer tasked with generating sophisticated sentences based on given information. The information is provided as a list of triples or quintuples. Your goal is to create {num_sentences} diverse, nuanced, and engaging sentences that accurately represent ALL the information provided in EACH triple/quintuple, while avoiding simplistic or shallow representations.

Given information:
{tuples}

Please generate {num_sentences} sentences based on this information. Each sentence should:

1. Incorporate ALL elements from ALL the provided triples/quintuples.
2. Be grammatically correct and natural-sounding.
3. Demonstrate significant variety in structure, complexity, and vocabulary from other generated sentences.
4. Aim for sophistication and depth. Create sentences that go beyond surface-level descriptions, delving into implications, contexts, or deeper meanings of the relationships described in the triples/quintuples.
5. Utilize advanced linguistic techniques such as:
   - Complex sentence structures (e.g., compound-complex sentences)
   - Subordinate clauses
   - Appositive phrases
   - Participle phrases
   - Metaphors or analogies when appropriate
   - Idiomatic expressions related to the concepts
6. Reframe the relationships in creative ways. For example, instead of saying "X is based in Y," consider phrases like "X has established its roots in Y," "Y serves as the backdrop for X's global operations," or "X's presence is woven into the fabric of Y's business landscape."
7. Incorporate relevant context or background information that enriches the understanding of the relationship, without introducing unsubstantiated facts.
8. Consider the broader implications or significance of the relationship described in the triple/quintuple.
9. Vary the focus or perspective of each sentence. Some might emphasize the subject, others the object, and others the relationship itself.
10. Use diverse and precise vocabulary, especially when describing relationships. Avoid repetitive or generic terms.

Example:
For a triple like (Microsoft, organization based in, America), instead of "Microsoft is an organization based in America," aim for sentences like:

"As a titan of the tech industry, Microsoft has not only shaped the digital landscape globally but has also become an integral part of America's innovation ecosystem, with its roots firmly planted in the fertile soil of U.S. entrepreneurship."

Important: Ensure that EVERY generated sentence contains ALL the information from ALL given triples/quintuples, while maintaining a high level of sophistication and engagement. Do not omit any information from any triple in any sentence.

Provide your response as a numbered list of {num_sentences} sentences. Do not include any additional explanations or comments.
    """


class SentenceGenerator:
    def __init__(self, config: Dict[str, Any]):
        self.prompt = PromptTemplate(
            input_variables=["tuples"],
            template=TEMPLATE,
        )
        self.llm = ChatOpenAI(
            temperature=config["temperature"],
            model_name=config["model_name"],
            api_key=config["api_key"],
            base_url=config["base_url"],
        )
        self.chain = LLMChain(llm=self.llm, prompt=self.prompt)
        self.batch_size = config["batch_size"]

    @staticmethod
    def format_tuple(
        t: Union[Tuple[str, str, str], Tuple[str, str, str, str, str]]
    ) -> str:
        return f"({', '.join(t)})"

    def format_tuple_list(
        self, tuples: List[Union[Tuple[str, str, str], Tuple[str, str, str, str, str]]]
    ) -> str:
        return ", ".join([self.format_tuple(t) for t in tuples])

    def parse_result(self, result: List[Dict]) -> List[str]:
        # sentences = result["text"].strip().split("\n")
        sentences = [r["text"].strip().split("\n") for r in result]
        sentences = [s[0] for s in sentences]
        return [
            s.split(". ", 1)[1].strip() if ". " in s else s.strip() for s in sentences
        ]

    def generate_sentences_from_tuples_batch(
        self,
        tuples_batch: List[
            List[Union[Tuple[str, str, str], Tuple[str, str, str, str, str]]]
        ],
    ) -> List[str]:
        try:
            formatted_tuples_batch = [
                {"tuples": self.format_tuple_list(tuples), "num_sentences": 1}
                for tuples in tuples_batch
            ]
            # tuples_str = json.dumps(formatted_tuples_batch)

            # result = self.chain.invoke({"tuples": tuples_str})
            result = self.chain.batch(formatted_tuples_batch)

            # print(result)
            sentences = self.parse_result(result)
            return sentences
            # sentences = result.strip().split("\n")
            # print(sentences)
            # return [
            #     s.split(". ", 1)[1].strip() if ". " in s else s.strip()
            #     for s in sentences
            # ]
        except Exception as e:
            logging.error(f"Error in generate_sentences_from_tuples_batch: {e}")
            raise

    def generate_sentences_for_df(self, df: pd.DataFrame) -> pd.DataFrame:
        def process_batch(batch: pd.DataFrame) -> List[str]:
            tuples_batch = [
                [
                    (
                        d["head"]["name"],
                        d["head"]["type"],
                        d["type"],
                        d["tail"]["name"],
                        d["tail"]["type"],
                    )
                    for d in row.relations
                ]
                for _, row in batch.iterrows()
            ]
            return self.generate_sentences_from_tuples_batch(tuples_batch)

        sentences = []
        for i in tqdm(range(0, len(df), self.batch_size), desc="Generating sentences"):
            batch = df.iloc[i : i + self.batch_size]
            sentences.extend(process_batch(batch))

        df["sentence"] = sentences
        return df


def filter_data(data: Dict[str, Any]) -> bool:
    entity_list = set()
    for r in data["relations"]:
        entity_list.add(r["head"]["name"])
        entity_list.add(r["tail"]["name"])

    sentence = data["sentence"]
    return all(entity in sentence for entity in entity_list)


def remove_pos_attr(data: Dict[str, Any]) -> Dict[str, Any]:
    relations = [
        {
            "head": {"name": r["head"]["name"], "type": r["head"]["type"]},
            "type": r["type"],
            "tail": {"name": r["tail"]["name"], "type": r["tail"]["type"]},
        }
        for r in data["relations"]
    ]
    return {
        "sentence": data["sentence"],
        "relations": relations,
    }


def main(args):
    dataset_name = args.dataset_name
    is_few_shot = args.is_few_shot
    few_shot_num = args.few_shot_num
    base_dir = f"{args.root_data_dir}/{dataset_name}"
    save_dir = f"{args.root_data_dir}/{dataset_name}/{args.save_dir_name}"
    os.makedirs(save_dir, exist_ok=True)

    origin_train_path = get_data_config(
        args.root_data_dir,
        dataset_name,
        is_few_shot=is_few_shot,
        few_shot_num=few_shot_num,
    )["train"]

    with open(f"{save_dir}/args.json", "w") as f:
        json.dump(vars(args), f, ensure_ascii=False, indent=4)

    logging.info(f"Origin train path: {origin_train_path}")
    logging.info(f"Arguments: {args}")

    # root_data_dir = f"{base_dir}/{'few_shot_' if is_few_shot else 'full_data_'}{few_shot_num}_{args.aug_times}"
    if is_few_shot:
        if few_shot_num != 0:
            root_data_dir = f"{base_dir}/few_shot_{few_shot_num}_{args.aug_times}"
        else:
            root_data_dir = f"{base_dir}/few_shot10%_{args.aug_times}"
    else:
        root_data_dir = f"{base_dir}/full_data_{args.aug_times}"
    generate_llm_dir = save_dir
    final_merged_path = f"{save_dir}/merge_train_dataset.json"

    merged_df_list = []
    rules = args.rules.split()

    # add_num = 20 if few_shot_num == 100 else 30 if few_shot_num == 150 else 20
    if not is_few_shot:
        add_num = 50
    else:
        add_num = 20 if few_shot_num == 100 else 30 if few_shot_num == 150 else 20
    origin_train_dataset = pd.read_json(origin_train_path, lines=args.is_few_shot)

    raw_count = origin_train_dataset.shape[0]
    aug_total_num = raw_count * args.aug_times
    per_rule_count = aug_total_num // len(rules) + add_num
    # per_rule_count = few_shot_num * args.aug_times // len(rules) + add_num

    sentence_generator = SentenceGenerator(CONFIG)

    for file in os.listdir(root_data_dir):
        if file.endswith(".json"):
            file_path = os.path.join(root_data_dir, file)
            save_file_name = os.path.splitext(os.path.basename(file_path))[0]

            if save_file_name not in rules:
                continue

            df = pd.read_json(file_path, lines=True, nrows=per_rule_count)
            # TODO: 暂时性将df 重复一遍，后续需要修改
            # df = pd.concat([df, df], ignore_index=True)
            # 将df 增加100条
            df = pd.concat([df, df.head(100)], ignore_index=True)
            df = sentence_generator.generate_sentences_for_df(df)

            df = df[df.apply(filter_data, axis=1)]
            merged_df_list.append(df)

            os.makedirs(generate_llm_dir, exist_ok=True)
            df.to_json(
                f"{generate_llm_dir}/{save_file_name}.json",
                orient="records",
                lines=True,
            )
            logging.info(f"Generated sentences for {file_path}")

    merged_df = pd.concat(merged_df_list, ignore_index=True)

    logging.info(f"Before deduplication: {merged_df.shape}")
    merged_df = merged_df.drop_duplicates(subset=["sentence"])
    logging.info(f"After deduplication: {merged_df.shape}")

    final_merged_df = pd.concat(
        [merged_df.head(aug_total_num), origin_train_dataset],
        ignore_index=True,
    )

    final_merged_df = final_merged_df.apply(remove_pos_attr, axis=1)

    final_merged_df.to_json(
        final_merged_path,
        orient="records",
        lines=True,
    )
    logging.info(f"Final merged dataset saved to {final_merged_path}")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="Data Processing and Sentence Generation Script"
    )
    parser.add_argument(
        "--dataset_name", type=str, default="conll04", help="dataset name"
    )
    parser.add_argument(
        "--root_data_dir",
        type=str,
        default="/home/wangxiaoli/datasets/IE_INSTRUCTIONS/RE",
        help="root data directory",
    )
    parser.add_argument(
        "--rules",
        type=str,
        default="add_triple origin remove_triple replace_same_type_entity swap_triple",
        help="rules for dataset enhancement",
    )
    parser.add_argument(
        "--aug_times", type=int, default=1, help="data augmentation multiplier"
    )
    parser.add_argument(
        "--is_few_shot", type=int, default=1, help="whether it's few shot"
    )
    parser.add_argument(
        "--few_shot_num", type=int, default=50, help="number of few shot samples"
    )
    parser.add_argument(
        "--save_dir_name", type=str, help="folder name for saving synthetic data"
    )

    args = parser.parse_args()
    print(args)
    main(args)
