import os
import logging
# from bot.insurance_sales_gpt.sales_gpt import SalesGPT
from bot.insurance_sales_gpt.demo.sales_gpt import  SalesGPT
from langchain.chat_models import ChatOpenAI
from langchain.llms.openai import OpenAI, OpenAIChat
from langchain.llms.base import LLM
from typing import Optional, List, Mapping, Any
import requests
from bot.bot import Bot
from bot.insurance_sales_gpt.demo.cache_dict import  cache_dict

os.environ['OPENAI_API_KEY'] = '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' # fill me in
class CustomLLM(LLM):
    model_name = "/mydata2/zengwenjia/project/bloom-1.7b_extract_train"

    def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
        prompt_length = len(prompt)
        prompt_dict = {}
        prompt_dict["instruction"] = "你是一个具备广博知识的智能个人助理"
        prompt_dict["input"] = prompt
        # 通过http获取response
        response = requests.get("http://180.184.75.50:7070/get_prompt_response", params=prompt_dict).text
        # inputs = self.pipeline.tokenizer(prompt, return_tensors="pt")
        # input_ids = inputs["input_ids"].to(self.pipeline.device)
        # generation_output = self.pipeline.model.generate(
        #     input_ids=input_ids,
        #     temperature=0.7,
        #     top_p=0.9,
        #     do_sample=True,
        #     num_beams=1,
        #     max_new_tokens=2048,
        #     eos_token_id=self.pipeline.tokenizer.eos_token_id,
        #     pad_token_id=self.pipeline.tokenizer.pad_token_id,
        #     return_dict_in_generate=True,
        #     output_scores=True
        # )
        # s = generation_output.sequences[0]
        # response = self.pipeline.tokenizer.decode(s)
        response = response.replace("</s>", "")
        # response = self.pipeline(prompt, max_new_tokens=num_output)[0]["generated_text"]

        # only return newly generated tokens
        return response

    @property
    def _identifying_params(self) -> Mapping[str, Any]:
        return {"name_of_model": self.model_name}

    @property
    def _llm_type(self) -> str:
        return "custom"
# llm=CustomLLM()





class SaleGPT(Bot):
    def __init__(self, verbose=True, **kwargs):
        super().__init__(**kwargs)
        self.llm = OpenAIChat(temperature=0, model_name="gpt-3.5-turbo")
        self.verbose = verbose
        self.sales_agent = SalesGPT.from_llm(self.llm, verbose=True,)
        # self.sales_agent.seed_agent()
    
    def reply(self, query, session_id=None):
        # user_input=query
        if self.verbose:
            print(f"User input: {query}")
        # self.sales_agent.human_step(user_input)
        # self.sales_agent.determine_conversation_stage()
        response=self.sales_agent.step(session_id,query)
        return response

    def get_user_info(self,session_id=None):
        try:
            cachedict=cache_dict.get(session_id,'')
            user_info=cachedict.get("user_info",{})
        except Exception as e:
            user_info=''
        logging.info('session_id:{},get user_info:{}'.format(session_id,user_info))
        return user_info



    
    

