mrhblfx commited on
Commit
6d2807f
2 Parent(s): c3cafd8 ae24fab

Merge branch 'binary-husky:master' into new_code_fun

Browse files
.gitignore CHANGED
@@ -145,3 +145,4 @@ cradle*
145
  debug*
146
  private*
147
  crazy_functions/test_project/pdf_and_word
 
 
145
  debug*
146
  private*
147
  crazy_functions/test_project/pdf_and_word
148
+ crazy_functions/test_samples
crazy_functional.py CHANGED
@@ -19,12 +19,18 @@ def get_crazy_functions():
19
  from crazy_functions.解析项目源代码 import 解析一个Lua项目
20
  from crazy_functions.解析项目源代码 import 解析一个CSharp项目
21
  from crazy_functions.总结word文档 import 总结word文档
 
22
  function_plugins = {
23
 
24
  "解析整个Python项目": {
25
  "Color": "stop", # 按钮颜色
26
  "Function": HotReload(解析一个Python项目)
27
  },
 
 
 
 
 
28
  "批量总结Word文档": {
29
  "Color": "stop",
30
  "Function": HotReload(总结word文档)
 
19
  from crazy_functions.解析项目源代码 import 解析一个Lua项目
20
  from crazy_functions.解析项目源代码 import 解析一个CSharp项目
21
  from crazy_functions.总结word文档 import 总结word文档
22
+ from crazy_functions.解析JupyterNotebook import 解析ipynb文件
23
  function_plugins = {
24
 
25
  "解析整个Python项目": {
26
  "Color": "stop", # 按钮颜色
27
  "Function": HotReload(解析一个Python项目)
28
  },
29
+ "[测试功能] 解析Jupyter Notebook文件": {
30
+ "Color": "stop",
31
+ "AsButton":False,
32
+ "Function": HotReload(解析ipynb文件),
33
+ },
34
  "批量总结Word文档": {
35
  "Color": "stop",
36
  "Function": HotReload(总结word文档)
crazy_functions/crazy_functions_test.py CHANGED
@@ -108,6 +108,13 @@ def test_联网回答问题():
108
  print("当前问答:", cb[-1][-1].replace("\n"," "))
109
  for i, it in enumerate(cb): print亮蓝(it[0]); print亮黄(it[1])
110
 
 
 
 
 
 
 
 
111
  # test_解析一个Python项目()
112
  # test_Latex英文润色()
113
  # test_Markdown中译英()
@@ -116,9 +123,8 @@ def test_联网回答问题():
116
  # test_总结word文档()
117
  # test_下载arxiv论文并翻译摘要()
118
  # test_解析一个Cpp项目()
119
-
120
- test_联网回答问题()
121
-
122
 
123
  input("程序完成,回车退出。")
124
  print("退出。")
 
108
  print("当前问答:", cb[-1][-1].replace("\n"," "))
109
  for i, it in enumerate(cb): print亮蓝(it[0]); print亮黄(it[1])
110
 
111
+ def test_解析ipynb文件():
112
+ from crazy_functions.解析JupyterNotebook import 解析ipynb文件
113
+ txt = "crazy_functions/test_samples"
114
+ for cookies, cb, hist, msg in 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
115
+ print(cb)
116
+
117
+
118
  # test_解析一个Python项目()
119
  # test_Latex英文润色()
120
  # test_Markdown中译英()
 
123
  # test_总结word文档()
124
  # test_下载arxiv论文并翻译摘要()
125
  # test_解析一个Cpp项目()
126
+ # test_联网回答问题()
127
+ test_解析ipynb文件()
 
128
 
129
  input("程序完成,回车退出。")
130
  print("退出。")
crazy_functions/解析JupyterNotebook.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from toolbox import update_ui
2
+ from toolbox import CatchException, report_execption, write_results_to_file
3
+ fast_debug = True
4
+
5
+
6
+ class PaperFileGroup():
7
+ def __init__(self):
8
+ self.file_paths = []
9
+ self.file_contents = []
10
+ self.sp_file_contents = []
11
+ self.sp_file_index = []
12
+ self.sp_file_tag = []
13
+
14
+ # count_token
15
+ from request_llm.bridge_all import model_info
16
+ enc = model_info["gpt-3.5-turbo"]['tokenizer']
17
+ def get_token_num(txt): return len(
18
+ enc.encode(txt, disallowed_special=()))
19
+ self.get_token_num = get_token_num
20
+
21
+ def run_file_split(self, max_token_limit=1900):
22
+ """
23
+ 将长文本分离开来
24
+ """
25
+ for index, file_content in enumerate(self.file_contents):
26
+ if self.get_token_num(file_content) < max_token_limit:
27
+ self.sp_file_contents.append(file_content)
28
+ self.sp_file_index.append(index)
29
+ self.sp_file_tag.append(self.file_paths[index])
30
+ else:
31
+ from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
32
+ segments = breakdown_txt_to_satisfy_token_limit_for_pdf(
33
+ file_content, self.get_token_num, max_token_limit)
34
+ for j, segment in enumerate(segments):
35
+ self.sp_file_contents.append(segment)
36
+ self.sp_file_index.append(index)
37
+ self.sp_file_tag.append(
38
+ self.file_paths[index] + f".part-{j}.txt")
39
+
40
+
41
+
42
+ def parseNotebook(filename, enable_markdown=1):
43
+ import json
44
+
45
+ CodeBlocks = []
46
+ with open(filename, 'r', encoding='utf-8', errors='replace') as f:
47
+ notebook = json.load(f)
48
+ for cell in notebook['cells']:
49
+ if cell['cell_type'] == 'code' and cell['source']:
50
+ # remove blank lines
51
+ cell['source'] = [line for line in cell['source'] if line.strip()
52
+ != '']
53
+ CodeBlocks.append("".join(cell['source']))
54
+ elif enable_markdown and cell['cell_type'] == 'markdown' and cell['source']:
55
+ cell['source'] = [line for line in cell['source'] if line.strip()
56
+ != '']
57
+ CodeBlocks.append("Markdown:"+"".join(cell['source']))
58
+
59
+ Code = ""
60
+ for idx, code in enumerate(CodeBlocks):
61
+ Code += f"This is {idx+1}th code block: \n"
62
+ Code += code+"\n"
63
+
64
+ return Code
65
+
66
+
67
+ def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
68
+ from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
69
+
70
+ pfg = PaperFileGroup()
71
+
72
+ print(file_manifest)
73
+ for fp in file_manifest:
74
+ file_content = parseNotebook(fp, enable_markdown=1)
75
+ pfg.file_paths.append(fp)
76
+ pfg.file_contents.append(file_content)
77
+
78
+ # <-------- 拆分过长的IPynb文件 ---------->
79
+ pfg.run_file_split(max_token_limit=1024)
80
+ n_split = len(pfg.sp_file_contents)
81
+
82
+ inputs_array = [r"This is a Jupyter Notebook file, tell me about Each Block in Chinese. Focus Just On Code." +
83
+ r"If a block starts with `Markdown` which means it's a markdown block in ipynbipynb. " +
84
+ r"Start a new line for a block and block num use Chinese." +
85
+ f"\n\n{frag}" for frag in pfg.sp_file_contents]
86
+ inputs_show_user_array = [f"{f}的分析如下" for f in pfg.sp_file_tag]
87
+ sys_prompt_array = ["You are a professional programmer."] * n_split
88
+
89
+ gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
90
+ inputs_array=inputs_array,
91
+ inputs_show_user_array=inputs_show_user_array,
92
+ llm_kwargs=llm_kwargs,
93
+ chatbot=chatbot,
94
+ history_array=[[""] for _ in range(n_split)],
95
+ sys_prompt_array=sys_prompt_array,
96
+ # max_workers=5, # OpenAI所允许的最大并行过载
97
+ scroller_max_len=80
98
+ )
99
+
100
+ # <-------- 整理结果,退出 ---------->
101
+ block_result = " \n".join(gpt_response_collection)
102
+ chatbot.append(("解析的结果如下", block_result))
103
+ history.extend(["解析的结果如下", block_result])
104
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
105
+
106
+ # <-------- 写入文件,退出 ---------->
107
+ res = write_results_to_file(history)
108
+ chatbot.append(("完成了吗?", res))
109
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
110
+
111
+ @CatchException
112
+ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
113
+ chatbot.append([
114
+ "函数插件功能?",
115
+ "对IPynb文件进行解析。Contributor: codycjy."])
116
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
117
+
118
+ history = [] # 清空历史
119
+ import glob
120
+ import os
121
+ if os.path.exists(txt):
122
+ project_folder = txt
123
+ else:
124
+ if txt == "":
125
+ txt = '空空如也的输入栏'
126
+ report_execption(chatbot, history,
127
+ a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
128
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
129
+ return
130
+ if txt.endswith('.ipynb'):
131
+ file_manifest = [txt]
132
+ else:
133
+ file_manifest = [f for f in glob.glob(
134
+ f'{project_folder}/**/*.ipynb', recursive=True)]
135
+ if len(file_manifest) == 0:
136
+ report_execption(chatbot, history,
137
+ a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}")
138
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
139
+ return
140
+ yield from ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, )