from gpt4all import GPT4All
import os
os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:7890'
os.environ['HTTP_PROXY'] = 'http://127.0.0.1:7890'
# Failed to load llamamodel-mainline-cuda-avxonly.dll: LoadLibraryExW failed with error 0x7e
# Failed to load llamamodel-mainline-cuda.dll: LoadLibraryExW failed with error 0x7e
exmaple='''
剛は心身の強さを象徴する性質だ
刚是象征身心强健的特质.  '''

tips=",按照上面格式:"
pre="每个单词造2个带有哲理性的日语句子，并且备注上翻译:"
suf='''
榊 → さかき → 榊树（神道供奉植物）
檜 → ひのき → 桧木 / 日本扁柏
檻 → おり → 笼子
櫛 → くし → 梳子
湯 → ゆ → 热水 / 温泉 / 汤
滝 → たき → 瀑布
炎 → ほのお → 火焰
'''
end="并且备注上读音，请简要输出"
end2=""
model = GPT4All("Meta-Llama-3-8B-Instruct.Q4_0.gguf") # downloads / loads a 4.66GB LLM
with model.chat_session():
    print(model.generate(exmaple+tips+pre+suf+end2, max_tokens=1024))

#     '(MaxRetryError("HTTPSConnectionPool(host='huggingface.co', port=443): Max retries exceeded with url: /Helsinki-NLP/opus-mt-en-zh/resolve/main/tokenizer_config.json (Caused by ProxyError('Unable to connect to proxy', SSLError(SSLEOFError(8, 'EOF occurred in violation of protocol (_ssl.c:997)'))))"), '(Request ID: 730649d5-6f71-428e-a23c-03788aaace97)')' thrown while requesting HEAD https://huggingface.co/Helsinki-NLP/opus-mt-en-zh/resolve/main/tokenizer_config.json
# Retrying in 1s [Retry 1/5].