
from langchain_openai import ChatOpenAI
# open_api_key = 'qTZXzdTAalYClDP9lBNBT3BlbkFJeAOCqHLtittXVnYr3Deb'
open_api_key = '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'
# llm = OpenAI(temperature =0, model_name="text-davinci-003", openai_api_key=open_api_key)

# llm = ChatOpenAI(openai_api_key=open_api_key)
llm = ChatOpenAI(temperature =0, openai_api_key=open_api_key)
with open('F:/tmp/讲稿订单优化.txt' , 'r', encoding= 'utf-8') as file:
    text = file.read()


num_tokens = llm.get_num_tokens(text)

print(num_tokens)
print(llm.get_token_ids(text))




# text_splitter = RecursiveCharacterTextSplitter(separators=["\n\n","\n"],chunk_size= 400,chunk_overlap=35);
# docs = text_splitter.create_documents([text])
# chain = load_summarize_chain(llm=llm,chain_type='map_reduce',verbose=True)
# output = chain.run(docs)
# print(output)
