zting commited on
Commit
377f223
1 Parent(s): f15a46e

Delete langChain_test.py

Browse files
Files changed (1) hide show
  1. langChain_test.py +0 -131
langChain_test.py DELETED
@@ -1,131 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- from langchain.agents import load_tools
4
- from langchain.agents import initialize_agent
5
- from langchain.agents import AgentType
6
- from langchain.llms import OpenAI
7
- from langchain.chat_models import ChatOpenAI
8
- from langchain.chains.conversation.memory import ConversationBufferWindowMemory
9
-
10
- import os
11
- from langchain.chat_models import AzureChatOpenAI
12
-
13
-
14
- #llm_fy = OpenAI(model_name="text-davinci-003", max_tokens=1024) #用来翻译的,已经用LLMChain替换了
15
-
16
-
17
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
18
- OPENAI_API_BASE = os.getenv("OPENAI_API_BASE")
19
- #llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY, temperature=0, model_name='gpt-3.5-turbo',openai_api_base=OPENAI_API_BASE)
20
- llm = AzureChatOpenAI(deployment_name="bitservice_chat_35",openai_api_base=OPENAI_API_BASE,openai_api_key=OPENAI_API_KEY,openai_api_version="2023-03-15-preview",model_name="gpt-3.5-turbo")
21
-
22
- import torch
23
- from transformers import BlipProcessor, BlipForConditionalGeneration
24
-
25
- image_to_text_model = "Salesforce/blip-image-captioning-large"
26
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
27
-
28
-
29
- processor = BlipProcessor.from_pretrained(image_to_text_model)
30
- model = BlipForConditionalGeneration.from_pretrained(image_to_text_model).to(device)
31
-
32
- from transformers.models.oneformer.modeling_oneformer import OneFormerModelOutput
33
- import requests
34
- from PIL import Image
35
-
36
- def describeImageByUrl(image_url):
37
- image_object = Image.open(requests.get(image_url, stream=True).raw).convert('RGB')
38
- # image
39
- inputs = processor(image_object, return_tensors="pt").to(device)
40
- outputs = model.generate(**inputs)
41
- describe = processor.decode(outputs[0], skip_special_tokens=True)
42
- return describe
43
-
44
- def describeImageByPath(image_path):
45
- image_object = Image.open(image_path).convert('RGB')
46
- # image
47
- inputs = processor(image_object, return_tensors="pt").to(device)
48
- outputs = model.generate(**inputs)
49
- describe = processor.decode(outputs[0], skip_special_tokens=True)
50
- return describe
51
-
52
-
53
- #description = describeImageByUrl('https://img0.baidu.com/it/u=4190066402,1916608022&fm=253&fmt=auto&app=120&f=JPEG?w=1280&h=800')
54
- #description
55
-
56
- from langchain.tools import BaseTool
57
-
58
- class DescribeImageTool(BaseTool):
59
- name = "Describe Image Tool"
60
- description = 'use this tool to describe an image.'
61
-
62
- def _run(self, url: str):
63
- #description = describeImageByUrl(url)
64
- description = describeImageByPath(url)
65
- return description
66
-
67
- def _arun(self, query: str):
68
- raise NotImplementedError("Async operation not supported yet")
69
-
70
-
71
- tools = [DescribeImageTool()]
72
-
73
-
74
- agent = initialize_agent(
75
- agent='chat-conversational-react-description',
76
- tools=tools,
77
- llm=llm,
78
- verbose=True,
79
- max_iterations=3,
80
- early_stopping_method='generate',
81
- memory=ConversationBufferWindowMemory(
82
- memory_key='chat_history',
83
- k=5,
84
- return_messages=True
85
- )
86
- )
87
-
88
- from langchain.chains import LLMChain
89
- from langchain.prompts import PromptTemplate
90
- def enToChinese(english):
91
- #ch = llm_fy("Please translate the following sentence from English to Chinese:"+english)
92
- #return ch
93
- pp = "Please translate the following sentence from English to Chinese:{english}"
94
- prompt = PromptTemplate(
95
- input_variables=["english"],
96
- template=pp
97
- )
98
- llchain=LLMChain(llm=llm,prompt=prompt)
99
- return llchain.run(english)
100
-
101
-
102
- def chToEnglish(chinese):
103
- #en = llm_fy("Please translate the following sentence from Chinese to English:"+chinese)
104
- #return en
105
- pp = "Please translate the following sentence from Chinese to English:{chinese}"
106
- prompt = PromptTemplate(
107
- input_variables=["chinese"],
108
- template=pp
109
- )
110
- llchain=LLMChain(llm=llm,prompt=prompt)
111
- return llchain.run(chinese)
112
-
113
- #image_url = 'https://img0.baidu.com/it/u=4190066402,1916608022&fm=253&fmt=auto&app=120&f=JPEG?w=1280&h=800'
114
- #agent(f"Describe the following image:\n{image_url}")
115
- #en_result = agent(f"描述下面这张图片:\n{image_url}")['output']
116
- #print(enToChinese(en_result))
117
-
118
- #agent(f"What is the brand of car in the following image:\n{image_url}")
119
- #en_result = agent(f"下面这张图片的汽车品牌是什么:\n{image_url}")['output']
120
- #print(enToChinese(en_result))
121
-
122
- def imageAnalyse(image_path,question):
123
- question = question.strip();
124
- if len(question) ==0:
125
- question = "请描述这张图片"
126
- print("question:"+question)
127
- en_result = agent(f"{question}:\n{image_path}")['output']
128
- print("en_result:"+en_result)
129
- ch_result = enToChinese(en_result)
130
- print("ch_result:"+ch_result)
131
- return ch_result