import csv
import os
import datetime
import torch
from tqdm import tqdm
import torch.nn.functional as F

from transformers import AutoTokenizer,  AutoModelForCausalLM, LlamaTokenizer, T5Tokenizer,BartTokenizer
from sklearn.feature_extraction.text import TfidfVectorizer
from transformers import GPT2Model

from ppl_utils import load_probability,calculate_perplexity,calculate_perplexity_fast,write_to_csv


cur_time = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")

# # 获取当前脚本的目录路径
project_dir = os.path.dirname(os.path.abspath(__file__))

# data_path = os.path.join(project_dir, 'data/train_essays.csv')
data_path = os.path.join(project_dir,'data/train_v2_drcat_02.csv')

load_probability(project_dir)
device = torch.device('cuda:0')
model_tokenizers = [
    (
        'GPT-2-Pretrain',
        AutoTokenizer.from_pretrained('gpt2'),
        AutoModelForCausalLM.from_pretrained('/home/llm_user/index/llm-detection/pretrained_models/stage1_output_model').half().to(device),
    ),
    (
         'gpt2', # name
         AutoTokenizer.from_pretrained(os.path.join(project_dir,'llm/gpt2')),
         50265 # vocab_size
    ),
    (
         'opt',
         AutoTokenizer.from_pretrained(os.path.join(project_dir,'llm/facebook/opt-1.3b')),
         50257
    ),
    (
         'unilm',
         AutoTokenizer.from_pretrained(os.path.join(project_dir,'llm/microsoft/unilm-base-cased')),
         28996
    ),
    (
         'llama',
         LlamaTokenizer.from_pretrained(os.path.join(project_dir,'llm/decapoda-research/llama-7b-hf')),
         32000
    ),
    (
         'bart',
         BartTokenizer.from_pretrained(os.path.join(project_dir,'llm/facebook/bart-base')),
         50265
    ),
    (
         't5',
         T5Tokenizer.from_pretrained(os.path.join(project_dir,'llm/google/flan-t5-base')),
         32128
    ),
    (
         'bloom',
         AutoTokenizer.from_pretrained(os.path.join(project_dir,'llm/bigscience/bloom-560m')),
         250880
    ),
    (
         'neo',
         AutoTokenizer.from_pretrained(os.path.join(project_dir,'llm/EleutherAI/gpt-neo-2.7B')),
         50257
    ),
    (
         'vicuna',
         LlamaTokenizer.from_pretrained(os.path.join(project_dir,'llm/lmsys/vicuna-7b-delta-v1.1')),
         32000
    ),
    (
         'gpt2_large',
         AutoTokenizer.from_pretrained(os.path.join(project_dir,'llm/gpt2-large')),
         50265
    ),
    (
         'opt_3b',
         AutoTokenizer.from_pretrained(os.path.join(project_dir,'llm/facebook/opt-2.7b')),
         50257
    ),
]

error_count1 = 0
error_count2 = 0
all_count = 0

filename = os.path.join(project_dir, f'data/train_v2_drcat_02_macro_features_{cur_time}.csv')

score_header = ["label",'gpt2-mean','gpt2-std','gpt2-max','gpt2-min'] + [i[0] for i in model_tokenizers[1:]]
write_to_csv([score_header],filename)
cur_texts = []
cur_labels = []
start_row = 0

if os.path.exists(filename):
    with open(filename,'r',encoding='utf-8') as f:
        lines = csv.reader(f)
        next(lines)
        for line in lines: start_row += 1

with open(data_path,'r',encoding='utf-8') as f:
    reader = csv.reader(f)
    header = next(reader)
    text_idx = header.index('text')
    if 'label' in header:
        label_idx = header.index('label')
    elif 'generated' in header:
        label_idx = header.index('generated')
    else:
        label_idx = None

    data = []
    for i,line in tqdm(enumerate(reader)):
        if i < start_row: continue
        all_count += 1
        ppls = []
        if label_idx is not None:
            new_line = [line[label_idx]]
        else:
            new_line = ['0']
        if line[label_idx] == '0': continue
    
        for (model_name,tokenizer,model_or_vocab_size) in model_tokenizers:
            if type(model_or_vocab_size) is int:
                vocab_size = model_or_vocab_size
                if 'gpt2' in model_name:
                    tokens = tokenizer.tokenize(line[text_idx],truncation=True)
                else:
                    tokens = tokenizer.tokenize(line[text_idx])
                token_ids = [tokenizer.convert_tokens_to_ids(tokens)]
                ppl = calculate_perplexity_fast(
                    token_ids, 
                    model_name, 
                    vocab_size
                )[0]
                ppls.append(ppl)
            else:
                model = model_or_vocab_size
                ppl = calculate_perplexity(
                    line[text_idx],
                    model=model,
                    tokenizer=tokenizer,
                    device=device
                )
                ppls.extend(ppl)
        new_line.extend(ppls)
        data.append(new_line)
        if len(data) >= 100:
            write_to_csv(data)
            data.clear()

    if len(data) > 0:
        write_to_csv(data)
        data.clear()