linpershey commited on
Commit
60274d1
1 Parent(s): 4a96867

add sonnet support

Browse files
Files changed (4) hide show
  1. app.py +4 -3
  2. model.py +130 -0
  3. sheet.py +103 -127
  4. utils.py +9 -0
app.py CHANGED
@@ -75,13 +75,14 @@ def do( business_id, business_name, address):
75
  extracted_results = extract_results( crawled_results, classes=classes)
76
  # logger.error(extracted_results['extracted_results'].columns)
77
  extracted_results = extracted_results['extracted_results'][ [ 'business_id', 'business_name', 'address', 'category', 'evidence', 'phone_number', 'description', 'store_name'] ]
78
-
 
79
  postprocessed_results = postprocess_result( extracted_results, postprocessed_results_path="/tmp/postprocessed_results.joblib", category_hierarchy=category2supercategory)
80
  os.remove("/tmp/postprocessed_results.joblib")
81
 
82
  formatted_results = format_output( postprocessed_results)
83
- # logger.error( formatted_results.columns)
84
-
85
  formatted_output = format_category( formatted_results)
86
 
87
  img = plot_wordcloud(formatted_results['formatted_evidence'].values[0])
 
75
  extracted_results = extract_results( crawled_results, classes=classes)
76
  # logger.error(extracted_results['extracted_results'].columns)
77
  extracted_results = extracted_results['extracted_results'][ [ 'business_id', 'business_name', 'address', 'category', 'evidence', 'phone_number', 'description', 'store_name'] ]
78
+ logger.debug( extracted_results['category'])
79
+ print(extracted_results['category'])
80
  postprocessed_results = postprocess_result( extracted_results, postprocessed_results_path="/tmp/postprocessed_results.joblib", category_hierarchy=category2supercategory)
81
  os.remove("/tmp/postprocessed_results.joblib")
82
 
83
  formatted_results = format_output( postprocessed_results)
84
+ logger.debug( formatted_results)
85
+ print(formatted_results)
86
  formatted_output = format_category( formatted_results)
87
 
88
  img = plot_wordcloud(formatted_results['formatted_evidence'].values[0])
model.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import argparse
4
+
5
+ from dotenv import load_dotenv
6
+ import anthropic
7
+ from openai import OpenAI
8
+
9
+ from utils import parse_json_garbage
10
+
11
+ load_dotenv()
12
+
13
+ def llm( provider, model, system_prompt, user_content):
14
+ """Invoke LLM service
15
+ Argument
16
+ --------
17
+ provider: str
18
+ openai or anthropic
19
+ model: str
20
+ Model name for the API
21
+ system_prompt: str
22
+ System prompt for the API
23
+ user_content: str
24
+ User prompt for the API
25
+ Return
26
+ ------
27
+ response: str
28
+ """
29
+ if provider=='openai':
30
+ client = OpenAI( organization = os.getenv('ORGANIZATION_ID'))
31
+ chat_completion = client.chat.completions.create(
32
+ messages=[
33
+ {
34
+ "role": "system",
35
+ "content": system_prompt
36
+ },
37
+ {
38
+ "role": "user",
39
+ "content": user_content,
40
+ }
41
+ ],
42
+ model = model,
43
+ response_format = {"type": "json_object"},
44
+ temperature = 0,
45
+ # stream = True
46
+ )
47
+ response = chat_completion.choices[0].message.content
48
+
49
+ elif provider=='anthropic':
50
+ client = anthropic.Client(api_key=os.getenv('ANTHROPIC_APIKEY'))
51
+ response = client.messages.create(
52
+ model= model,
53
+ system= system_prompt,
54
+ messages=[
55
+ {"role": "user", "content": user_content} # <-- user prompt
56
+ ],
57
+ max_tokens = 1024
58
+ )
59
+ response = response.content[0].text
60
+ else:
61
+ raise Exception("Invalid provider")
62
+
63
+ return response
64
+
65
+
66
+ if __name__ == "__main__":
67
+ parser = argparse.ArgumentParser()
68
+ parser.add_argument("--provider", type=str, default='anthropic', help="openai or anthropic")
69
+ parser.add_argument("--model", type=str, default='claude-3-sonnet-20240229', help="Model name for the API",
70
+ choices = ["claude-3-sonnet-20240229", "claude-3-haiku-20240307", "gpt-3.5-turbo-0125", "gpt-4-0125-preview"])
71
+ parser.add_argument("--classes", type=list, default=['小吃店', '日式料理(含居酒屋,串燒)', '火(鍋/爐)', '東南亞料理(不含日韓)', '海鮮熱炒', '特色餐廳(含雞、鵝、牛、羊肉)', '傳統餐廳', '燒烤', '韓式料理(含火鍋,烤肉)', '西餐廳(含美式,義式,墨式)', '西餐廳(餐酒館、酒吧、飛鏢吧、pub、lounge bar)', '西餐廳(土耳其、漢堡、薯條、法式、歐式、印度)', '早餐'])
72
+ parser.add_argument("--task", type=list, default='extract', choices=['extract', 'classify'])
73
+ args = parser.parse_args()
74
+
75
+
76
+
77
+ classes = ['小吃店', '日式料理(含居酒屋,串燒)', '火(鍋/爐)', '東南亞料理(不含日韓)', '海鮮熱炒', '特色餐廳(含雞、鵝、牛、羊肉)', '傳統餐廳', '燒烤', '韓式料理(含火鍋,烤肉)', '西餐廳(含美式,義式,墨式)', ]
78
+ backup_classes = [ '中式', '西式']
79
+
80
+ extraction_prompt = '''
81
+ As a helpful and rigorous retail analyst, given the provided query and a list of search results for the query,
82
+ your task is to first identify relevant information of the identical store based on store name and proxmity of address if known. After that, extract `store_name`, `address`, `description`, `category` and `phone_number` from the found relevant information, where `category` can only be `小吃店`, `日式料理(含居酒屋,串燒)`, `火(鍋/爐)`, `東南亞料理(不含日韓)`, `海鮮熱炒`, `特色餐廳(含雞、鵝、牛、羊肉)`, `傳統餐廳`, `燒烤`, `韓式料理(含火鍋,烤肉)`, `西餐廳(含美式,義式,墨式)`, `西餐廳(餐酒館、酒吧、飛鏢吧、pub、lounge bar)`, `西餐廳(土耳其、漢堡、薯條、法式、歐式、印度)` or `早餐`.
83
+ It's very important to omit unrelated results. Do not make up any assumption.
84
+ Please think step by step, and output in json format. An example output json is like {"store_name": "...", "address": "...", "description": "... products, service or highlights ...", "category": "...", "phone_number": "..."}
85
+ If no relevant information has been found, simply output json with empty values.
86
+ I'll tip you and guarantee a place in heaven you do a great job completely according to my instruction.
87
+ '''
88
+ classification_prompt = f"""
89
+ As a helpful and rigorous retail analyst, given the provided information about a store,
90
+ your task is two-fold. First, classify provided evidence below into the mostly relevant category from the following: {classes}.
91
+ Second, if no relevant information has been found, classify the evidence into the mostly relevant supercategory from the following: {backup_classes}.
92
+ It's very important to omit unrelated piece of evidence and don't make up any assumption.
93
+ Please think step by step, and must output in json format. An example output json is like {{"category": "..."}}
94
+ If no relevant piece of information can ever be found at all, simply output json with empty string "".
95
+ I'll tip you and guarantee a place in heaven you do a great job completely according to my instruction.
96
+ """
97
+
98
+ if args.task == 'extract':
99
+ system_prompt = extraction_prompt
100
+ elif args.task == 'classify':
101
+ system_prompt = classification_prompt
102
+ else:
103
+ raise Exception("Invalid task")
104
+
105
+ query = "山の迴饗"
106
+ search_results = str([{"title": "山の迴饗", "snippet": "謝謝大家這麼支持山の迴饗 我們會繼續努力用心做出美味的料理 ————————— ⛰️ 山の迴饗地址:台東縣關山鎮中華路56號訂位專線:0975-957-056 · #山的迴饗 · #夢想起飛"}, {"title": "山的迴饗餐館- 店家介紹", "snippet": "營業登記資料 · 統一編號. 92433454 · 公司狀況. 營業中 · 公司名稱. 山的迴饗餐館 · 公司類型. 獨資 · 資本總額. 30000 · 所在地. 臺東縣關山鎮中福里中華路56號 · 使用發票."}, {"title": "關山漫遊| 💥山の迴饗x night bar", "snippet": "山の迴饗x night bar 即將在12/1號台東關山開幕! 別再煩惱池上、鹿野找不到宵夜餐酒館 各位敬請期待並關注我們✨ night bar❌山的迴饗 12/1 ..."}, {"title": "山的迴饗| 中西複合式餐廳|焗烤飯|義大利麵 - 台灣美食網", "snippet": "山的迴饗| 中西複合式餐廳|焗烤飯|義大利麵|台式三杯雞|滷肉飯|便當|CP美食營業時間 ; 星期一, 休息 ; 星期二, 10:00–14:00 16:00–21:00 ; 星期三, 10:00–14:00 16:00– ..."}, {"title": "便當|CP美食- 山的迴饗| 中西複合式餐廳|焗烤飯|義大利麵", "snippet": "餐廳山的迴饗| 中西複合式餐廳|焗烤飯|義大利麵|台式三杯雞|滷肉飯|便當|CP美食google map 導航. 臺東縣關山鎮中華路56號 +886 975 957 056 ..."}, {"title": "山的迴饗餐館", "snippet": "山的迴饗餐館,統編:92433454,地址:臺東縣關山鎮中福里中華路56號,負責人姓名:周偉慈,設立日期:112年11月15日."}, {"title": "山的迴饗餐館", "snippet": "山的迴饗餐館. 資本總額(元), 30,000. 負責人, 周偉慈. 登記地址, 看地圖 臺東縣關山鎮中福里中華路56號 郵遞區號查詢. 設立日期, 2023-11-15. 資料管理 ..."}, {"title": "山的迴饗餐館, 公司統一編號92433454 - 食品業者登錄資料集", "snippet": "公司或商業登記名稱山的迴饗餐館的公司統一編號是92433454, 登錄項目是餐飲場所, 業者地址是台東縣關山鎮中福里中華路56號, 食品業者登錄字號是V-202257990-00001-5."}, {"title": "山的迴饗餐館, 公司統一編號92433454 - 食品業者登錄資料集", "snippet": "公司或商業登記名稱山的迴饗餐館的公司統一編號是92433454, 登錄項目是公司/商業登記, 業者地址是台東縣關山鎮中福里中華路56號, 食品業者登錄字號是V-202257990-00000-4 ..."}, {"title": "山的迴饗餐館", "snippet": "負責人, 周偉慈 ; 登記地址, 台東縣關山鎮中福里中華路56號 ; 公司狀態, 核准設立 「查詢最新營業狀況請至財政部稅務入口網 」 ; 資本額, 30,000元 ; 所在縣市 ..."}, {"title": "山的迴饗 | 關山美食|焗烤飯|酒吧|義大利麵|台式三杯雞|滷肉飯|便當|CP美食", "顧客評價": "324晚餐餐點豬排簡餐加白醬焗烤等等餐點。\t店家也提供免費的紅茶 綠茶 白開水 多種的調味料自取 總而言之 CP值真的很讚\t空間舒適涼爽,店員服務周到"}, {"title": "類似的店", "snippet": "['中國菜']\t['客家料理']\t['餐廳']\t['熟食店']\t['餐廳']"}, {"telephone_number": "0975 957 056"}])
107
+
108
+ # query = "大吃一斤泰國蝦麻辣牛肉爐"
109
+ # search_results = str([{"title": "大吃一斤泰國蝦麻辣牛肉爐", "snippet": "... 一支、本店特賣價600元免費代料理、 保證、活的!歡迎來電預定0975-147-848大吃一斤活蝦料理店新北市三重區自強路一段222號泰國蝦活蝦現場料理不漲價一斤維持一斤480元."}, {"title": "大吃一斤泰國蝦麻辣牛肉爐", "snippet": "... 一支、本店特賣價600元免費代料理、 保證、活的!歡迎來電預定0975-147-848大吃一斤活蝦料理店新北市三重區自強路一段222號泰國蝦活蝦現場料理不漲價一斤維持一斤480元."}, {"title": "大吃一斤", "snippet": "大吃一斤在foodpanda點的到,更多New Taipei City 推薦美食,線上訂立即送,下載foodpanda APP,20分鐘外送上門!瀏覽菜單和獨家優惠折扣."}, {"title": "大吃一斤(新北板橋店)菜單", "snippet": "大吃一斤(新北板橋店) 在foodpanda點的到,更多New Taipei City 推薦美食,線上訂立即送,下載foodpanda APP,20分鐘外送上門!"}, {"title": "大吃一斤活蝦餐廳- 店家介紹", "snippet": "大吃一斤活蝦餐廳. 資本總額. 200000. 代表人. 李錦鴻. 所在區域. 新北市. 所在地. 新北市三重區自強路1段222號(1樓). 商業類型. 獨資. 異動紀錄. 1111108. 營業狀態為: ..."}, {"title": "新北市| 三重區大吃一斤(泰國蝦牛肉料理店)", "snippet": "大吃一斤(泰國蝦牛肉料理店) 餐廳介紹 ; phone icon 電話, 0975 147 848 ; 營業時間, 星期一17:00–04:00 星期二17:00–04:00 星期三17:00–04:00 星期四17:00– ..."}, {"title": "大吃一斤活蝦餐廳", "snippet": "大吃一斤活蝦餐廳. 負責人姓名, 李錦鴻. 地址, 新北市三重區自強路1段222號(1樓). 現況, 核准設立. 資本額(元), 200,000. 組織類型, 獨資. 登記機關, 新北市政府經濟發展局."}, {"title": "【大吃一斤(泰國蝦牛肉料理店)】網友評價- 新北三重區合菜餐廳", "snippet": "大吃一斤(泰國蝦牛肉料理店) - 網友評論、最新食記(132則) 評分: 4.4分。大吃一斤(泰國蝦牛肉料理店)是位於新北三重區的餐廳,地址: 新北市 ... 生猛活海鮮."}, {"title": "大吃一斤生猛海鮮/活魚料理超值優惠方案", "snippet": "大吃一斤生猛海鮮/活魚料理. 電話:0975-147-848. 地址:新北市三重區自強路一段222號. 營業時間:週一至週日17: ..."}, {"title": "大吃一斤三重店 (泰國蝦料理.平價快炒熱炒.各式海鮮)", "顧客評價": "塔香蛤蜊、胡椒蝦、檸檬蝦、胡椒鳳螺 口味不錯食材新鮮 拍照時蛤蜊已經快被小孩吃光\t蝦子不大,店面不大,魚腥味很重,廁所很多蚊子,連菜裡面也有蚊子🦟,根本吃不下去\t新鮮好吃😋老闆人很Nice 推薦鹽烤蝦以及蒜味奶油蝦👍👍👍"}, {"title": "類似的店", "snippet": "['海鮮']\t['海鮮']\t['海鮮']\t['海鮮']"}, {"telephone_number": "0975 147 848"}])
110
+
111
+ if args.provider == "openai":
112
+ client = OpenAI( organization = os.getenv('ORGANIZATION_ID'))
113
+ # categories = ", ".join([ "`"+x+"`" for x in args.classes if x!='早餐' ])+ " or " + "`早餐`"
114
+ user_content = f'''
115
+ `query`: `{query}`,
116
+ `search_results`: {search_results}
117
+ '''
118
+ resp = llm( args.provider, args.model, system_prompt, user_content)
119
+ print(f"resp -> {resp}")
120
+
121
+
122
+ elif args.provider == "anthropic":
123
+ client = anthropic.Client(api_key=os.getenv('ANTHROPIC_APIKEY'))
124
+ user_content = f'''
125
+ `query`: `{query}`,
126
+ `search_results`: {search_results}
127
+ '''
128
+ print(f"user_content -> {user_content}")
129
+ resp = llm( args.provider, args.model, system_prompt, user_content)
130
+ print(resp)
sheet.py CHANGED
@@ -14,6 +14,9 @@ import tiktoken
14
  from openai import OpenAI
15
  from tqdm import tqdm
16
 
 
 
 
17
  load_dotenv()
18
  ORGANIZATION_ID = os.getenv('OPENAI_ORGANIZATION_ID')
19
  SERP_API_KEY = os.getenv('SERP_APIKEY')
@@ -69,69 +72,45 @@ def get_condensed_result(result):
69
  # print( condensed_results )
70
  return condensed_result
71
 
72
-
73
- def compose_analysis( client, query, search_results, classes: list, model: str = 'gpt-3.5-turbo-0125'):
74
  """
75
  Argument
76
  query: str
77
  search_results: str
 
 
 
78
  model: "gpt-4-0125-preview" or 'gpt-3.5-turbo-0125'
79
  Return
80
  response: str
81
  """
82
- categories = ", ".join([ "`"+x+"`" for x in classes if x!='早餐' ])+ " or " + "`早餐`"
83
- # print(f"categoreis: {categories}")
84
- system_prompt = '''
85
- As a helpful and rigorous retail analyst, given the provided query and a list of search results for the query,
86
- your task is to first identify relevant information of the identical store based on store name and proxmity of address if known. After that, extract `store_name`, `address`, `description`, `category` and `phone_number` from the found relevant information, where `category` can only be `小吃店`, `日式料理(含居酒屋,串燒)`, `火(鍋/爐)`, `東南亞料理(不含日韓)`, `海鮮熱炒`, `特色餐廳(含雞、鵝、牛、羊肉)`, `傳統餐廳`, `燒烤`, `韓式料理(含火鍋,烤肉)`, `西餐廳(含美式,義式,墨式)`, `西餐廳(餐酒館、酒吧、飛鏢吧、pub、lounge bar)`, `西餐廳(土耳其、漢堡、薯條、法式、歐式、印度)` or `早餐`.
87
- It's very important to omit unrelated results. Do not make up any assumption.
88
- Please think step by step, and output in json format. An example output json is like {"store_name": "...", "address": "...", "description": "... products, service or highlights ...", "category": "...", "phone_number": "..."}
89
- If no relevant information has been found, simply output json with empty values.
90
- I'll tip you and guarantee a place in heaven you do a great job completely according to my instruction.
91
- '''
92
- # print(f"system prompt = {system_prompt}")
93
- chat_completion = client.chat.completions.create(
94
- messages=[
95
- {
96
- "role": "system",
97
- "content": system_prompt
98
- },
99
- {
100
- "role": "user",
101
- "content": f'''
102
- `query`: `{query}`,
103
- `search_results`: {search_results}
104
- ''',
105
- }
106
- ],
107
  model = model,
108
- response_format = {"type": "json_object"},
109
- temperature = 0,
110
- # stream = True
111
  )
112
- # response = []
113
- # for chunk in chat_completion:
114
- # text = chunk.choices[0].delta.content or ""
115
- # response.append(text)
116
- # print( text, end="")
117
- # return "".join(response)
118
- response = chat_completion.choices[0].message.content
119
  return response
120
 
121
 
122
- def compose_classication(
123
- client,
124
- evidence,
125
- classes: list = ['小吃店', '日式料理(含居酒屋,串燒)', '火(鍋/爐)', '東南亞料理(不含日韓)', '海鮮熱炒', '特色餐廳(含雞、鵝、牛、羊肉)', '傳統餐廳', '燒烤', '韓式料理(含火鍋,烤肉)', '西餐廳(含美式,義式,墨式)', ],
126
- backup_classes: list = [ '中式', '西式'],
127
- model: str = 'gpt-3.5-turbo-0125'
128
- ) -> str:
129
  """
130
  Argument
131
  client:
132
  evidence: str
133
  classes: list
134
- model: 'gpt-3.5-turbo-0125', 'gpt-4-0125-preview'
 
135
  Return
136
  response: str
137
  """
@@ -141,44 +120,34 @@ def compose_classication(
141
  pass
142
  else:
143
  raise Exception(f"Incorrect classes type: {type(classes)}")
144
- chat_completion = client.chat.completions.create(
145
- messages=[
146
- {
147
- "role": "system",
148
- "content": f'''
149
- As a helpful and rigorous retail analyst, given the provided information about a store,
150
- your task is two-fold. First, classify provided evidence below into the mostly relevant category from the following: {classes}.
151
- Second, if no relevant information has been found, classify the evidence into the mostly relevant supercategory from the following: {backup_classes}.
152
- It's very important to omit unrelated piece of evidence and don't make up any assumption.
153
- Please think step by step, and output in json format. An example output json is like {{"category": "..."}}
154
- If no relevant piece of information can ever be found at all, simply output json with empty string "".
155
- I'll tip you and guarantee a place in heaven you do a great job completely according to my instruction.
156
- '''
157
- },
158
- {
159
- "role": "user",
160
- "content": f'''
161
- `evidence`: `{evidence}`
162
- ''',
163
- }
164
- ],
165
  model = model,
166
- response_format = {"type": "json_object"},
167
- temperature = 0,
168
- # stream = True
169
  )
170
- response = chat_completion.choices[0].message.content
171
  return response
172
 
173
 
174
  def classify_results(
175
  analysis_results: pd.DataFrame,
 
 
 
 
176
  input_column: str = 'evidence',
177
  output_column: str = 'classified_category',
178
- classes: list = ['小吃店', '日式料理(含居酒屋,串燒)', '火(鍋/爐)', '東南亞料理(不含日韓)', '海鮮熱炒', '特色餐廳(含雞、鵝、牛、羊肉)', '傳統餐廳', '燒烤', '韓式料理(含火鍋,烤肉)', '西餐廳(含美式,義式,墨式)'],
179
- backup_classes: list = [ '中式', '西式']
180
  ):
181
- """
182
  Argument
183
  analysis_results: dataframe
184
  input_column: str
@@ -187,13 +156,13 @@ def classify_results(
187
  Return
188
  analysis_results: dataframe
189
  """
190
- client = OpenAI( organization = ORGANIZATION_ID)
191
  classified_results = analysis_results.copy()
192
- empty_indices = []
193
- labels = []
194
  for idx, evidence in zip( analysis_results['index'], analysis_results[input_column]):
195
  try:
196
- label = json.loads(compose_classication( client, evidence, classes=classes, backup_classes=backup_classes))['category']
 
 
197
  labels.append(label)
198
  except Exception as e:
199
  print(f"# CLASSIFICATION error -> evidence: {e}")
@@ -206,13 +175,15 @@ def classify_results(
206
  "empty_indices": empty_indices
207
  }
208
 
209
- def classify_results_mp( extracted_results: pd.DataFrame, classified_file_path, classes, backup_classes, n_processes: int = 4):
210
  """
211
  Argument
212
  extracted_results:
213
  classified_file_path:
214
- classes: ['小吃店', '日式料理(含居酒屋,串燒)', '火(鍋/爐)', '東南亞料理(不含日韓)', '海鮮熱炒', '特色餐廳(含雞、鵝、牛、羊肉)', '傳統餐廳', '燒烤', '韓式料理(含火鍋,烤肉)', '西餐廳(含美式,義式,墨式)']
215
- backup_classes: [ '中式', '西式']
 
 
216
  n_processes: int
217
  Return
218
  classified_results: dataframe
@@ -228,10 +199,9 @@ def classify_results_mp( extracted_results: pd.DataFrame, classified_file_path,
228
  classify_results,
229
  [ (
230
  d,
231
- 'evidence',
232
- 'classified_category',
233
- classes,
234
- backup_classes
235
  ) for d in split_data]
236
  )
237
  classified_results = merge_results( classified_results, dataframe_columns=['classified_results'], list_columns=['empty_indices'])
@@ -329,16 +299,14 @@ def crawl_results_mp( data: pd.DataFrame, crawl_file_path: str, n_processes: int
329
  print( f"total time: {time.time() - st}")
330
  return crawled_results
331
 
332
- def extract_results( data: pd.DataFrame, classes: list ):
333
  """
334
  Argument
335
  data: `evidence`, `result`
336
  Return
337
  extracted_results: dataframe of `extracted_evidence`
338
  """
339
- client = OpenAI( organization = ORGANIZATION_ID)
340
- extracted_results = []
341
- empty_indices = []
342
  for i, d in tqdm(enumerate(data.itertuples())):
343
  idx = d[1]
344
  evidence = d.evidence
@@ -348,10 +316,10 @@ def extract_results( data: pd.DataFrame, classes: list ):
348
  ana_res = None
349
  query = compose_query( address, business_name)
350
  try:
351
- ana_res = compose_analysis( client, query = query, search_results = evidence, classes = classes)
352
- ana_res = json.loads(ana_res)
353
  except Exception as e:
354
- print(f"# ANALYSIS error {e}: i = {i}, ana_res = {ana_res}")
355
  empty_indices.append(i)
356
  continue
357
 
@@ -360,7 +328,7 @@ def extract_results( data: pd.DataFrame, classes: list ):
360
  "business_id": business_id,
361
  "business_name": business_name,
362
  "evidence": evidence,
363
- ** ana_res
364
  } )
365
  extracted_results = pd.DataFrame(extracted_results)
366
 
@@ -369,9 +337,12 @@ def extract_results( data: pd.DataFrame, classes: list ):
369
  "empty_indices": empty_indices
370
  }
371
 
372
- def extract_results_mp( crawled_results, extracted_file_path, classes: list):
373
  """
374
  Argument
 
 
 
375
  Return
376
  Reference
377
  200 records, 4 processes, 502.26914715766907
@@ -380,8 +351,8 @@ def extract_results_mp( crawled_results, extracted_file_path, classes: list):
380
  # args.extracted_file_path = "data/extracted_results.joblib"
381
  if not os.path.exists(extracted_file_path):
382
  split_data = split_dataframe( crawled_results)
383
- with mp.Pool(args.n_processes) as pool:
384
- extracted_results = pool.starmap( extract_results, [ (x, classes) for x in split_data])
385
  extracted_results = merge_results( extracted_results, dataframe_columns=['extracted_results'], list_columns=['empty_indices'])
386
  with open( extracted_file_path, "wb") as f:
387
  joblib.dump( extracted_results, f)
@@ -522,57 +493,59 @@ def main(args):
522
  Argument
523
  args: argparse
524
  """
525
-
 
 
 
 
 
 
526
  ## 讀取資料名單 ##
527
- data = get_leads(args.data_path).tail(20)
528
 
529
  ## 進行爬蟲與分析 ##
530
- # crawled_results = crawl_results(data)
531
- crawled_results = crawl_results_mp( data, args.crawled_file_path, n_processes=args.n_processes)
532
 
533
  ## 方法 1: 擷取關鍵資訊與分類 ##
534
- # extracted_results = extract_results(
535
- # crawled_results['crawled_results']
536
- # )
537
  extracted_results = extract_results_mp(
538
  crawled_results = crawled_results['crawled_results'],
539
- extracted_file_path = args.extracted_file_path
 
 
 
 
540
  )
541
 
542
  ## 方法2: 直接對爬蟲結果分類 ##
543
- # classified_results = classify_results(
544
- # extracted_results['extracted_results'],
545
- # input_column = 'evidence',
546
- # output_column = 'classified_category',
547
- # classes = ['中式', '西式'],
548
- # backup_classes = [ '中式', '西式']
549
- # )
550
  classified_results = classify_results_mp(
551
  extracted_results['extracted_results'],
552
- args.classified_file_path,
553
- classes=args.classes,
554
- backup_classes=args.backup_classes,
555
- n_processes=args.n_processes
 
 
556
  )
557
 
558
  ## 合併分析結果 ##
559
  combined_results = combine_results(
560
  classified_results['classified_results'],
561
- args.combined_file_path,
562
- src_column='classified_category',
563
- tgt_column='category',
564
- strategy='replace'
565
  )
566
 
567
  ## 後處理分析結果 ##
568
  postprossed_results = postprocess_result(
569
  combined_results,
570
- args.postprocessed_results,
571
  category2supercategory
572
  )
573
 
574
  formatted_results = format_output( postprossed_results, input_column = 'evidence', output_column = 'formatted_evidence', format_func = format_evidence)
575
- formatted_results.to_csv( args.formatted_results, index=False)
576
 
577
 
578
  category2supercategory = {
@@ -623,15 +596,18 @@ if __name__=='__main__':
623
 
624
  parser = argparse.ArgumentParser()
625
  parser.add_argument("--data_path", type=str, default="data/餐廳類型分類.xlsx - 測試清單.csv")
626
- parser.add_argument("--classified_file_path", type=str, default="data/gpt3.5/classified_results.joblib")
627
- parser.add_argument("--extracted_file_path", type=str, default="data/gpt3.5/extracted_results.joblib")
628
- parser.add_argument("--crawled_file_path", type=str, default="data/gpt3.5/crawled_results.joblib")
629
- parser.add_argument("--combined_file_path", type=str, default="data/gpt3.5/combined_results.joblib")
630
- parser.add_argument("--postprocessed_results", type=str, default="data/gpt3.5/postprocessed_results.joblib")
631
- parser.add_argument("--formatted_results", type=str, default="data/gpt3.5/formatted_results.csv")
 
632
  parser.add_argument("--classes", type=list, default=['小吃店', '日式料理(含居酒屋,串燒)', '火(鍋/爐)', '東南亞料理(不含日韓)', '海鮮熱炒', '特��餐廳(含雞、鵝、牛、羊肉)', '傳統餐廳', '燒烤', '韓式料理(含火鍋,烤肉)', '西餐廳(含美式,義式,墨式)', '西餐廳(餐酒館、酒吧、飛鏢吧、pub、lounge bar)', '西餐廳(土耳其、漢堡、薯條、法式、歐式、印度)', '早餐'])
633
  parser.add_argument("--backup_classes", type=list, default=['中式', '西式'])
634
- parser.add_argument("--strategy", type=str, default='replace', choices=['replace', 'patch'])
 
 
635
  parser.add_argument("--n_processes", type=int, default=4)
636
  args = parser.parse_args()
637
 
 
14
  from openai import OpenAI
15
  from tqdm import tqdm
16
 
17
+ from model import llm
18
+ from utils import parse_json_garbage
19
+
20
  load_dotenv()
21
  ORGANIZATION_ID = os.getenv('OPENAI_ORGANIZATION_ID')
22
  SERP_API_KEY = os.getenv('SERP_APIKEY')
 
72
  # print( condensed_results )
73
  return condensed_result
74
 
75
+ def compose_extraction( query, search_results, classes: list, provider: str, model: str):
 
76
  """
77
  Argument
78
  query: str
79
  search_results: str
80
+ system_prompt: str
81
+ classes: list, `小吃店`, `日式料理(含居酒屋,串燒)`, `火(鍋/爐)`, `東南亞料理(不含日韓)`, `海鮮熱炒`, `特色餐廳(含雞、鵝、牛、羊肉)`, `傳統餐廳`, `燒烤`, `韓式料理(含火鍋,烤肉)`, `西餐廳(含美式,義式,墨式)`, `西餐廳(餐酒館、酒吧、飛鏢吧、pub、lounge bar)`, `西餐廳(土耳其、漢堡、薯條、法式、歐式、印度)` or `早餐`
82
+ provider: "openai"
83
  model: "gpt-4-0125-preview" or 'gpt-3.5-turbo-0125'
84
  Return
85
  response: str
86
  """
87
+ classes = ", ".join([ "`"+x+"`" for x in classes if x!='早餐' ])+ " or " + "`早餐`"
88
+ system_prompt = f'''
89
+ As a helpful and rigorous retail analyst, given the provided query and a list of search results for the query,
90
+ your task is to first identify relevant information of the identical store based on store name and proxmity of address if known. After that, extract `store_name`, `address`, `description`, `category` and `phone_number` from the found relevant information, where `category` can only be {classes}.
91
+ It's very important to omit unrelated results. Do not make up any assumption.
92
+ Please think step by step, and output in json format. An example output json is like {{"store_name": "...", "address": "...", "description": "... products, service or highlights ...", "category": "...", "phone_number": "..."}}
93
+ If no relevant information has been found, simply output json with empty values.
94
+ I'll tip you and guarantee a place in heaven you do a great job completely according to my instruction.
95
+ '''
96
+ user_content = f"`query`: `{query}`\n`search_results`: {search_results}"
97
+ response = llm(
98
+ provider = provider,
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  model = model,
100
+ system_prompt = system_prompt,
101
+ user_content = user_content
 
102
  )
 
 
 
 
 
 
 
103
  return response
104
 
105
 
106
+ def compose_classication( user_content, classes: list, backup_classes: list, provider: str, model: str) -> str:
 
 
 
 
 
 
107
  """
108
  Argument
109
  client:
110
  evidence: str
111
  classes: list
112
+ provider: e.g. 'openai'
113
+ model: e.g. 'gpt-3.5-turbo-0125', 'gpt-4-0125-preview'
114
  Return
115
  response: str
116
  """
 
120
  pass
121
  else:
122
  raise Exception(f"Incorrect classes type: {type(classes)}")
123
+ system_prompt = f"""
124
+ As a helpful and rigorous retail analyst, given the provided information about a store,
125
+ your task is two-fold. First, classify provided evidence below into the mostly relevant category from the following: {classes}.
126
+ Second, if no relevant information has been found, classify the evidence into the mostly relevant supercategory from the following: {backup_classes}.
127
+ It's very important to omit unrelated piece of evidence and don't make up any assumption.
128
+ Please think step by step, and must output in json format. An example output json is like {{"category": "..."}}
129
+ If no relevant piece of information can ever be found at all, simply output json with empty string "".
130
+ I'll tip you and guarantee a place in heaven you do a great job completely according to my instruction.
131
+ """
132
+ response = llm(
133
+ provider = provider,
 
 
 
 
 
 
 
 
 
 
134
  model = model,
135
+ system_prompt = system_prompt,
136
+ user_content = user_content,
 
137
  )
 
138
  return response
139
 
140
 
141
  def classify_results(
142
  analysis_results: pd.DataFrame,
143
+ classes: list,
144
+ backup_classes: list,
145
+ provider: str,
146
+ model: str,
147
  input_column: str = 'evidence',
148
  output_column: str = 'classified_category',
 
 
149
  ):
150
+ """Classify the results
151
  Argument
152
  analysis_results: dataframe
153
  input_column: str
 
156
  Return
157
  analysis_results: dataframe
158
  """
 
159
  classified_results = analysis_results.copy()
160
+ labels, empty_indices = [], []
 
161
  for idx, evidence in zip( analysis_results['index'], analysis_results[input_column]):
162
  try:
163
+ user_content = f'''`evidence`: `{evidence}`'''
164
+ pred_cls = compose_classication( user_content, classes=classes, backup_classes=backup_classes, provider=provider, model=model)
165
+ label = parse_json_garbage(pred_cls)['category']
166
  labels.append(label)
167
  except Exception as e:
168
  print(f"# CLASSIFICATION error -> evidence: {e}")
 
175
  "empty_indices": empty_indices
176
  }
177
 
178
+ def classify_results_mp( extracted_results: pd.DataFrame, classified_file_path: str, classes: list, backup_classes: list, provider: str, model: str, n_processes: int = 4):
179
  """
180
  Argument
181
  extracted_results:
182
  classified_file_path:
183
+ classes: e.g. ['小吃店', '日式料理(含居酒屋,串燒)', '火(鍋/爐)', '東南亞料理(不含日韓)', '海鮮熱炒', '特色餐廳(含雞、鵝、牛、羊肉)', '傳統餐廳', '燒烤', '韓式料理(含火鍋,烤肉)', '西餐廳(含美式,義式,墨式)']
184
+ backup_classes: e.g. [ '中式', '西式']
185
+ provider:
186
+ model:
187
  n_processes: int
188
  Return
189
  classified_results: dataframe
 
199
  classify_results,
200
  [ (
201
  d,
202
+ classes, backup_classes,
203
+ provider, model,
204
+ 'evidence', 'classified_category',
 
205
  ) for d in split_data]
206
  )
207
  classified_results = merge_results( classified_results, dataframe_columns=['classified_results'], list_columns=['empty_indices'])
 
299
  print( f"total time: {time.time() - st}")
300
  return crawled_results
301
 
302
+ def extract_results( data: pd.DataFrame, classes: list, provider: str, model: str):
303
  """
304
  Argument
305
  data: `evidence`, `result`
306
  Return
307
  extracted_results: dataframe of `extracted_evidence`
308
  """
309
+ extracted_results, empty_indices, ext_res = [], [], []
 
 
310
  for i, d in tqdm(enumerate(data.itertuples())):
311
  idx = d[1]
312
  evidence = d.evidence
 
316
  ana_res = None
317
  query = compose_query( address, business_name)
318
  try:
319
+ ext_res = compose_extraction( query = query, search_results = evidence, classes = classes, provider = provider, model = model)
320
+ ext_res = parse_json_garbage(ext_res)
321
  except Exception as e:
322
+ print(f"# ANALYSIS error: e = {e}, i = {i}, q = {query}, ext_res = {ext_res}")
323
  empty_indices.append(i)
324
  continue
325
 
 
328
  "business_id": business_id,
329
  "business_name": business_name,
330
  "evidence": evidence,
331
+ ** ext_res
332
  } )
333
  extracted_results = pd.DataFrame(extracted_results)
334
 
 
337
  "empty_indices": empty_indices
338
  }
339
 
340
+ def extract_results_mp( crawled_results, extracted_file_path, classes: list, provider: str, model: str, n_processes: int = 4):
341
  """
342
  Argument
343
+ crawled_results: dataframe
344
+ extracted_file_path
345
+ classes: list
346
  Return
347
  Reference
348
  200 records, 4 processes, 502.26914715766907
 
351
  # args.extracted_file_path = "data/extracted_results.joblib"
352
  if not os.path.exists(extracted_file_path):
353
  split_data = split_dataframe( crawled_results)
354
+ with mp.Pool(n_processes) as pool:
355
+ extracted_results = pool.starmap( extract_results, [ (x, classes, provider, model) for x in split_data])
356
  extracted_results = merge_results( extracted_results, dataframe_columns=['extracted_results'], list_columns=['empty_indices'])
357
  with open( extracted_file_path, "wb") as f:
358
  joblib.dump( extracted_results, f)
 
493
  Argument
494
  args: argparse
495
  """
496
+ crawled_file_path = os.path.join( args.output_dir, args.crawled_file_path)
497
+ extracted_file_path = os.path.join( args.output_dir, args.extracted_file_path)
498
+ classified_file_path = os.path.join( args.output_dir, args.classified_file_path)
499
+ combined_file_path = os.path.join( args.output_dir, args.combined_file_path)
500
+ postprocessed_results = os.path.join( args.output_dir, args.postprocessed_results)
501
+ formatted_results_path = os.path.join( args.output_dir, args.formatted_results_path)
502
+
503
  ## 讀取資料名單 ##
504
+ data = get_leads(args.data_path).tail(5)
505
 
506
  ## 進行爬蟲與分析 ##
507
+ crawled_results = crawl_results_mp( data, crawled_file_path, n_processes=args.n_processes)
508
+ crawled_results = { k:v[-5:] for k,v in crawled_results.items()}
509
 
510
  ## 方法 1: 擷取關鍵資訊與分類 ##
 
 
 
511
  extracted_results = extract_results_mp(
512
  crawled_results = crawled_results['crawled_results'],
513
+ extracted_file_path = extracted_file_path,
514
+ classes = args.classes,
515
+ provider = args.provider,
516
+ model = args.model,
517
+ n_processes = args.n_processes
518
  )
519
 
520
  ## 方法2: 直接對爬蟲結果分類 ##
 
 
 
 
 
 
 
521
  classified_results = classify_results_mp(
522
  extracted_results['extracted_results'],
523
+ classified_file_path,
524
+ classes = args.classes,
525
+ backup_classes = args.backup_classes,
526
+ provider = args.provider,
527
+ model = args.model,
528
+ n_processes = args.n_processes
529
  )
530
 
531
  ## 合併分析結果 ##
532
  combined_results = combine_results(
533
  classified_results['classified_results'],
534
+ combined_file_path,
535
+ src_column = 'classified_category',
536
+ tgt_column = 'category',
537
+ strategy = args.strategy
538
  )
539
 
540
  ## 後處理分析結果 ##
541
  postprossed_results = postprocess_result(
542
  combined_results,
543
+ postprocessed_results,
544
  category2supercategory
545
  )
546
 
547
  formatted_results = format_output( postprossed_results, input_column = 'evidence', output_column = 'formatted_evidence', format_func = format_evidence)
548
+ formatted_results.to_csv( formatted_results_path, index=False)
549
 
550
 
551
  category2supercategory = {
 
596
 
597
  parser = argparse.ArgumentParser()
598
  parser.add_argument("--data_path", type=str, default="data/餐廳類型分類.xlsx - 測試清單.csv")
599
+ parser.add_argument("--output_dir", type=str, help='output directory')
600
+ parser.add_argument("--classified_file_path", type=str, default="classified_results.joblib")
601
+ parser.add_argument("--extracted_file_path", type=str, default="extracted_results.joblib")
602
+ parser.add_argument("--crawled_file_path", type=str, default="crawled_results.joblib")
603
+ parser.add_argument("--combined_file_path", type=str, default="combined_results.joblib")
604
+ parser.add_argument("--postprocessed_results", type=str, default="postprocessed_results.joblib")
605
+ parser.add_argument("--formatted_results_path", type=str, default="formatted_results.csv")
606
  parser.add_argument("--classes", type=list, default=['小吃店', '日式料理(含居酒屋,串燒)', '火(鍋/爐)', '東南亞料理(不含日韓)', '海鮮熱炒', '特��餐廳(含雞、鵝、牛、羊肉)', '傳統餐廳', '燒烤', '韓式料理(含火鍋,烤肉)', '西餐廳(含美式,義式,墨式)', '西餐廳(餐酒館、酒吧、飛鏢吧、pub、lounge bar)', '西餐廳(土耳其、漢堡、薯條、法式、歐式、印度)', '早餐'])
607
  parser.add_argument("--backup_classes", type=list, default=['中式', '西式'])
608
+ parser.add_argument("--strategy", type=str, default='patch', choices=['replace', 'patch'])
609
+ parser.add_argument("--provider", type=str, default='anthropic', choices=['openai', 'anthropic'])
610
+ parser.add_argument("--model", type=str, default='claude-3-sonnet-20240229', choices=['claude-3-sonnet-20240229', 'claude-3-haiku-20240307', 'gpt-3.5-turbo-0125', 'gpt-4-0125-preview'])
611
  parser.add_argument("--n_processes", type=int, default=4)
612
  args = parser.parse_args()
613
 
utils.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ def parse_json_garbage(s):
4
+ s = s[next(idx for idx, c in enumerate(s) if c in "{["):]
5
+ try:
6
+ return json.loads(s)
7
+ except json.JSONDecodeError as e:
8
+ return json.loads(s[:e.pos])
9
+