import os

import langchain
import torch
from langchain import LLMChain, PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import (ChatGLM, HuggingFaceHub, HuggingFacePipeline,
                            TextGen, Tongyi)
from langchain.prompts import PromptTemplate
from transformers import (AutoModelForCausalLM, AutoModelForSeq2SeqLM,
                          AutoTokenizer, TextStreamer, pipeline)


class langchaindemo:

    def local_loader(self):
        """
        本地LLM加载,使用HuggingFacePipeline连接到langchain
        """
        localmodels = {r'E:\llama\text-generation-webui\models\chatglm2-6b',
                       r'E:\llama\text-generation-webui\models\Baichuan2-7B-Base',
                       r'E:\llama\text-generation-webui\models\Baichuan2-7B-Chat',
                       r'E:\llama\text-generation-webui\models\THUDM_chatglm-6b'}
        modeid = localmodels[0]

        tokenizer = AutoTokenizer.from_pretrained(
            modeid, use_fast=False, trust_remote_code=True)
        model = AutoModelForCausalLM.from_pretrained(
            modeid, trust_remote_code=True, device_map="auto", torch_dtype=torch.bfloat16)

        taskid = "text2text-generation"

        pipe = pipeline(
            task=taskid,
            model=model,
            tokenizer=tokenizer,
            max_length=100
            # device=0
        )

        local_llm = HuggingFacePipeline(pipeline=pipe)
        print(local_llm('What is the capital of France? '))
        print(local_llm('中国曾经的首都在哪里'))

    def t5(self):
        # 自动下载huggingface.co上的LLM
        modeid = 'google/flan-t5-large'

        tokenizer = AutoTokenizer.from_pretrained(modeid)
        model = AutoModelForSeq2SeqLM.from_pretrained(
            modeid, trust_remote_code=True, device_map="auto")

        taskid = "text2text-generation"

        pipe = pipeline(
            task=taskid,
            model=model,
            tokenizer=tokenizer,
            max_length=100
            # device=0
        )
        return pipe

    def facebook(self):
        # 自动下载huggingface.co上的LLM
        modeid = 'facebook/blenderbot-1B-distill'

        tokenizer = AutoTokenizer.from_pretrained(modeid)
        model = AutoModelForSeq2SeqLM.from_pretrained(
            modeid, trust_remote_code=True)

        taskid = "text2text-generation"

        pipe = pipeline(
            task=taskid,
            model=model,
            tokenizer=tokenizer,
            max_length=100
            # device=0
        )
        return pipe

    def gpt2(self):
        # 自动下载huggingface.co上的LLM
        modeid = 'gpt2-medium'

        tokenizer = AutoTokenizer.from_pretrained(modeid)
        model = AutoModelForCausalLM.from_pretrained(
            modeid, trust_remote_code=True, device_map="auto")

        taskid = "text-generation"

        pipe = pipeline(
            task=taskid,
            model=model,
            tokenizer=tokenizer,
            max_length=100
            # device=0
        )
        return pipe

    def linksoul(self):

        model_path = "LinkSoul/Chinese-Llama-2-7b"

        tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
        model = AutoModelForCausalLM.from_pretrained(model_path).half().cuda()
        streamer = TextStreamer(
            tokenizer, skip_prompt=True, skip_special_tokens=True)

        instruction = """[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe.  Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.

                If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n<</SYS>>\n\n{} [/INST]"""

        prompt = instruction.format("用英文回答，什么是夫妻肺片？")
        generate_ids = model.generate(tokenizer(
            prompt, return_tensors='pt').input_ids.cuda(), max_new_tokens=4096, streamer=streamer)
        print(tokenizer.decode(generate_ids[0]))

    def test_t5(self):
        Question = "What is the capital of France? "
        pipe = self.t5()
        local_llm = HuggingFacePipeline(pipeline=pipe)
        print(local_llm(Question))

        template = """Question: {question}
        Answer: Let's think step by step."""

        prompt = PromptTemplate(
            template=template, input_variables=["question"])

        llm_chain = LLMChain(prompt=prompt, llm=local_llm)
        print(llm_chain.run(Question))

    def test_gpt2(self):
        Question = "What is the capital of France? "
        pipe = self.gpt2()
        local_llm = HuggingFacePipeline(pipeline=pipe)
        print(local_llm(Question))

        template = """Question: {question}
        Answer: Let's think step by step."""

        prompt = PromptTemplate(
            template=template, input_variables=["question"])

        llm_chain = LLMChain(prompt=prompt, llm=local_llm)
        print(llm_chain.run(Question))

    def test_facebook(self):
        Question = "What area is best for growing wine in France?"
        pipe = self.facebook()
        local_llm = HuggingFacePipeline(pipeline=pipe)
        print(local_llm(Question))

        template = """Question: {question}
        Answer: Let's think step by step."""

        prompt = PromptTemplate(
            template=template, input_variables=["question"])

        llm_chain = LLMChain(prompt=prompt, llm=local_llm)
        print(llm_chain.run(Question))

    def huggingface_call(self):
        """
        直接访问huggingface上的模型
        """
        hf_token = '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'
        os.environ['HUGGINGFACEHUB_API_TOKEN'] = hf_token

        modeid = 'gpt2'
        modeid = 'google/flan-t5-large'

        template = """Question: {question}

        Answer: Let's think step by step."""

        prompt = PromptTemplate(
            template=template, input_variables=["question"])

        Question = "What is the capital of France? "

        llm_chain = LLMChain(prompt=prompt, llm=HuggingFaceHub(
            repo_id=modeid, huggingfacehub_api_token=hf_token))
        print(llm_chain.run(Question))

    def api_text_generation_webui(self):
        """
        # 使用text-generation-webui加载各种模型,然后langchain连接它。
        # python server.py --api --listen
        """

        model_url = "http://127.0.0.1:5000"
        langchain.debug = True

        template = """Question: {question}

        Answer: Let's think step by step."""

        prompt = PromptTemplate(
            template=template, input_variables=["question"])
        llm = TextGen(model_url=model_url)
        llm_chain = LLMChain(prompt=prompt, llm=llm)

        question = "if x^2=16, then what is the value of x-2?"
        question = "陕西省省会城市在哪里 "

        # llm_chain.run(question)
        print(llm(question))

    def api_chatglm(self):
        """
        # 使用chatglm自带的服务器进行对话
        # python api.py
        """

        model_url = "http://127.0.0.1:8000"
        langchain.debug = True

        template = """{question}"""

        prompt = PromptTemplate(
            template=template, input_variables=["question"])
        llm = ChatGLM(endpoint_url=model_url, max_token=80000, top_p=0.9, temperature=0.1, history=[
                      ['i am a student', 'i am in xian']], model_kwargs={"sample_mode_args": False},)
        llm_chain = LLMChain(prompt=prompt, llm=llm)

        question = "if x^2=16, then what is the value of x-2?"
        question = "陕西省省会城市在哪里 "

        llm_chain.run(question)

       
    def api_tongyiqwen(self):
        """
        使用国内的通义千问等进行访问，这个是成功的。
        """
        
        template = """Question: {question}
        Answer: Let's think step by step."""
        
        key = 'sk-af7d53a016f744e586432e291685df54'
        os.environ["DASHSCOPE_API_KEY"]=key 
        prompt = PromptTemplate(template=template, input_variables=["question"]) 
        llm = Tongyi()
        llm_chain = LLMChain(prompt=prompt, llm=llm)
        question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
        print(llm_chain.run(question))


def test_api_chatglm():
    kk = langchaindemo()
    kk.api_chatglm()

def test_api_tongyi():
    kk = langchaindemo()
    kk.api_tongyiqwen()

test_api_tongyi() 