import pandas as pd
import numpy as np
from agent.llm_agent import LLMAgent
from langchain import FAISS, OpenAI
import asyncio
import openai
import json
import time


import json
import tiktoken # for token counting
import numpy as np
from collections import defaultdict
# format={"messages": [{"role": "system", "content": "You are a happy assistant that puts a positive spin on everything."}, {"role": "user", "content": "I fell off my bike today."}, {"role": "assistant", "content": "It's great that you're getting exercise outdoors!"}]}


import os

os.environ['OPENAI_API_KEY'] = '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' # fill me in


def format_check(data_path=''):
    # Load the dataset
    with open(data_path, 'r', encoding='utf-8') as f:
        dataset = [json.loads(line) for line in f]

    # Format error checks
    format_errors = defaultdict(int)
    for ex in dataset:
        if not isinstance(ex, dict):
            format_errors["data_type"] += 1
            continue

        messages = ex.get("messages", None)
        if not messages:
            format_errors["missing_messages_list"] += 1
            continue

        for message in messages:
            if "role" not in message or "content" not in message:
                format_errors["message_missing_key"] += 1

            if any(k not in ("role", "content", "name", "function_call") for k in message):
                format_errors["message_unrecognized_key"] += 1

            if message.get("role", None) not in ("system", "user", "assistant", "function"):
                format_errors["unrecognized_role"] += 1

            content = message.get("content", None)
            function_call = message.get("function_call", None)

            if (not content and not function_call) or not isinstance(content, str):
                format_errors["missing_content"] += 1

        if not any(message.get("role", None) == "assistant" for message in messages):
            format_errors["example_missing_assistant_message"] += 1

    if format_errors:
        print("Found errors:")
        for k, v in format_errors.items():
            print(f"{k}: {v}")
    else:
        print("No errors found")


def write_to_jsonl(samples,file_path=''):
    # Load the dataset
    with open(file_path, 'a', encoding='utf-8') as f:
        for sample in samples:
            f.write(json.dumps(sample, ensure_ascii=False) + "\n")


def token_count(content):
    encoding = tiktoken.get_encoding("cl100k_base")
    token_count=len(encoding.encode(content))
    # print("token count: ", token_count)
    return token_count


#split_content function
def split_content(content,every_content_length=2500,window_dialogue=2):
    content_len = token_count(content)
    if content_len<=every_content_length:
        return [content]
    else:
        content_num=content_len//every_content_length+1
        dialogue_list=content.split('\n')
        res_content_list=[]
        for i in range(content_num):
            if i>0:
                res_content=res_content[-window_dialogue:]
            else:
                res_content=[]
            while(token_count('\n'.join(res_content))<=every_content_length):
                if len(dialogue_list)==0:
                    break
                res_content.append(dialogue_list.pop(0))
            res_content_list.append('\n'.join(res_content))
        return res_content_list





