import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import pandas as pd
import time
from tqdm import tqdm
import os
import chardet   
import re
 
os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3"

 
if torch.cuda.device_count() > 0:
    device_map = "auto"
else:
    device_map = None

 

# 28.66
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/shakechen/Llama-2-7b-chat-hf"
model_name = "/home/ZJQ/.cache/modelscope/hub/models/LLM-Research/Meta-Llama-3___1-8B-Instruct"

# 28.54
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/modelscope/Llama-2-7b-chat-ms"


tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    trust_remote_code=True,
    device_map=device_map,
    torch_dtype=torch.float16
)

if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token

def load_csv_dataset(file_path, num_samples=None):

    try:
        
        
        with open(file_path, 'rb') as f:
            raw_data = f.read(10000)  
            
            result = chardet.detect(raw_data)
            encoding = result['encoding']
            confidence = result['confidence']
            print(f": {encoding} ( {confidence:.2f})")
        

 
 
        encodings_to_try = [encoding, 'utf-8', 'latin-1', 'gbk', 'gb2312', 'iso-8859-1']
        for enc in encodings_to_try:
            try:
                # 
                
                df = pd.read_csv(file_path, encoding=enc)
                total_available = len(df)
                
                
                
                if num_samples is not None and num_samples > 0:
                    
                    actual_samples = min(num_samples, total_available)
                    df = df.head(actual_samples)
                    print(f" {actual_samples}/{total_available} ")
                else:
                    print(f" {total_available} ")
                
                print(f": {list(df.columns)}")
                return df
            except UnicodeDecodeError:
                continue
        
        
        print(f" {file_path}")
        return None
        
    except Exception as e:
        print(f" {e}")
        return None


dataset_path = "/home/ZJQ/pypro/data/TruthfulQA.csv"
num_samples = 817  #
dataset = load_csv_dataset(dataset_path, num_samples=num_samples)
if dataset is None or len(dataset) == 0:
    exit()
def parse_incorrect_answers(s):

    pattern = r"'(.*?)'"
    answers = re.findall(pattern, s)
    
    return [ans.strip() for ans in answers if ans.strip()]

def process_options(row):

    options = {'A': row['best_answer']}
    
    incorrect_str = row['incorrect_answers']
    incorrect_answers = parse_incorrect_answers(incorrect_str)
    
   
    for i, ans in enumerate(incorrect_answers):
        letter = chr(66 + i)  # B, C, D...
        options[letter] = ans
    return options


try:
    dataset['options'] = dataset.apply(process_options, axis=1)
except KeyError as e:
    print(f"{e}")

    exit()


def batch_inference(data, batch_size=1):
    correct_count = 0
    total_count = len(data)
    batches = [data[i:i + batch_size] for i in range(0, len(data), batch_size)]

    start_time = time.time()

    for batch in tqdm(batches, desc="处理批次"):


        prompts = []
        actual_labels = []


        for index, row in batch.iterrows():
            question = row['question']
            options_str = "\n".join([f"{k}. {v}" for k, v in row['options'].items()])

            prompt = f"""Question: {question} Options: {options_str} please select one of the correct options above. correct answer option is? """
            prompts.append(prompt)
            actual_labels.append('A') 
            # print(f"prompt:{prompt}")
            # print("-"*55)


        inputs = tokenizer(prompts, return_tensors="pt")
        inputs = inputs.to(model.device)  

        
        with torch.no_grad():
            outputs = model.generate(
                **inputs,
                max_new_tokens=22,  
                eos_token_id=tokenizer.eos_token_id,  
                pad_token_id=tokenizer.pad_token_id   
            )

        for i, output in enumerate(outputs):
            input_length = len(inputs.input_ids[i])
            generated_tokens = output[input_length:]
            generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip().lower()
            print(f"generated_text:{generated_text}")

            
            # if index < 10:
                # print(f"问题: {batch.iloc[i]['question']}")
                # print(f"选项: {batch.iloc[i]['options']}")

                # print(f"模型输出: {generated_text}")
                # print(f"预期答案: {actual_labels[i]}")
                # print("**" * 50)

            if "(a)" in generated_text.lower():
                correct_count += 1
            if re.search(r'\ba. \b', generated_text):
                correct_count += 1
            # if re.search(r'\b(a)\b', generated_text):
                # correct_count += 1

    end_time = time.time()
    print(f"推理完成，耗时: {end_time - start_time:.2f}秒")
    return correct_count, total_count


try:
    correct_count, total_count = batch_inference(dataset, batch_size=1)

    accuracy = correct_count / total_count if total_count > 0 else 0
    print(f"MC1: {accuracy:.4f}")
except Exception as e:
    print(f" {e}")
    exit()
    