import os 
import pandas as pd
import torch 
import warnings
warnings.filterwarnings('ignore')
from accelerate import PartialState
from datasets import load_dataset
from pathlib import Path 
from peft import PeftModel
from transformers import pipeline
from trl import ModelConfig

from model.handle_model import get_model 
from prompts.prompt import Prompt


def formatting_prompts_func(examples): 
    inputs = examples["input"] 
    texts = []
    for q in inputs:
        # Must add EOS_TOKEN, otherwise your generation will go on forever!
        text = Prompt.q_prompt.format(
            question=q.replace('您是一位电池研究与性能分析专家。', '').replace(' ', ''), 
        )
        texts.append(text)
        #print(f"text: {text}**********")
    return {"text" : texts}


def pipe_infer(): 

    data_files = {
        'train': 'train/*lqy1.jsonl',
        'validation': 'eval/*lqy1.jsonl'
    }
    data_path = '/gemini/data-2' 
    data = load_dataset(data_path, data_files=data_files['train'], split='train[80:120]') 
    data = data.map(formatting_prompts_func, batched=True)
    
    # model_name = '/gemini/data-1/output_batllm-Qwen-32B/last_model'
    model_name = Path('/gemini/data-1/DeepSeek-R1-Distill-Qwen-32B')
    model_args = ModelConfig(
        model_name_or_path=model_name,
        torch_dtype=torch.bfloat16,        
        attn_implementation=None #"flash_attention_2"
    )
    model, tokenizer = get_model(model_args)  
    model.eval()

    peft_model_path = '/gemini/data-1/output_batllm-Qwen-32B/peft_model'
    
    generator = pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer
    )

    generator.model = PeftModel.from_pretrained(
        generator.model,
        peft_model_path,
        inference_mode=True
    )
 
    # # Generate a response based on a prompt
    # for i in range(len(data['input'])): 
        
    #     prompt = Prompt.q_prompt.format(
    #         question=data[i]['input']
    #         .replace('您是一位电池研究与性能分析专家。', '')
    #         .replace(' ', '')
    #     )
        
    #     if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
    #         print('=======> Question:')
    #         print(prompt, '\n')
        
    #     # Generate the response with specified parameters
    #     output = generator(
    #         prompt,
    #         max_new_tokens=1024,  
    #         temperature=1e-4,  # Controls randomness of the output (higher is more random)
    #         do_sample=False,   # Use sampling rather than greedy search
    #         eos_token_id=tokenizer.eos_token_id  # Ensure the model stops at the end-of-sequence token
    #     )
        
    #     if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
    #         print('=======> Answer:')
    #         print(output[0]['generated_text'], '\n\n\n')
    #         res['question'].append(data[i]['input'])
    #         res['prediction'].append(output[0]['generated_text'])

    output = generator(
        data['text'],
        max_new_tokens=1800,  
        temperature=1e-1,  # Controls randomness of the output (higher is more random)
        do_sample=True,   # Use sampling rather than greedy search
        top_k=3,
        eos_token_id=tokenizer.eos_token_id  # Ensure the model stops at the end-of-sequence token
    )


    if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
        res = {}
        res['question'] = list(data['input'])
        #print(output[0])
        res['prediction'] = [output[i][0]['generated_text'] for i in range(len(output))]
        # res['prediction1'] = [output[i][1]['generated_text'] for i in range(len(output))]
        print(res['prediction'])
        df = pd.DataFrame(res) 
        df.to_csv('precition_100_100_lqy.csv', index=False)


if __name__ == '__main__':
    pipe_infer()