from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import ChatGLM
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("text-generation", model="google/gemma-2b")
print(pipe("What is the capital of China?"))

# hf = HuggingFacePipeline.from_model_id(
#     model_id="google/gemma-2b",
#     task="text-generation",
#     device=0,
#     pipeline_kwargs={"max_new_tokens": 500},
# )


# from langchain.prompts import PromptTemplate

# # template = """Question: {question}


# template = """<start_of_turn>user
# {question}<end_of_turn>
# <start_of_turn>model
# """

# prompt_template = PromptTemplate(template=template, input_variables=["question"])

# end = "<end_of_turn>\n"
# prompt = """<start_of_turn>system
# You are very helpful assistant that can answer user question with some short sentences with creativity,
# if you don't know the answer, just say you don't know.<end_of_turn>\n"""

# question = "What is the capital of France?"
# prompt += prompt_template.format(question=question)
# a = hf.invoke(prompt)
# print(a)

