R1ckShi commited on
Commit
1179dad
1 Parent(s): 1427ef7

update introduction

Browse files
Files changed (2) hide show
  1. app.py +5 -5
  2. introduction.py +40 -0
app.py CHANGED
@@ -8,7 +8,7 @@ import logging
8
  import gradio as gr
9
  from funasr import AutoModel
10
  from videoclipper import VideoClipper
11
- from introduction import top_md_1, top_md_3, top_md_4
12
  from llm.openai_api import openai_call
13
  from llm.g4f_openai_api import g4f_openai_call
14
  from llm.qwen_api import call_qwen_model
@@ -123,10 +123,10 @@ if __name__ == "__main__":
123
  # gradio interface
124
  theme = gr.Theme.load("utils/theme.json")
125
  with gr.Blocks(theme=theme) as funclip_service:
126
- gr.Markdown(top_md_1)
127
- # gr.Markdown(top_md_2)
128
- gr.Markdown(top_md_3)
129
- gr.Markdown(top_md_4)
130
  video_state, audio_state = gr.State(), gr.State()
131
  with gr.Row():
132
  with gr.Column():
 
8
  import gradio as gr
9
  from funasr import AutoModel
10
  from videoclipper import VideoClipper
11
+ from introduction import *
12
  from llm.openai_api import openai_call
13
  from llm.g4f_openai_api import g4f_openai_call
14
  from llm.qwen_api import call_qwen_model
 
123
  # gradio interface
124
  theme = gr.Theme.load("utils/theme.json")
125
  with gr.Blocks(theme=theme) as funclip_service:
126
+ gr.Markdown(top_md_1_en)
127
+ gr.Markdown(top_md_2_en)
128
+ gr.Markdown(top_md_3_en)
129
+ gr.Markdown(top_md_4_en)
130
  video_state, audio_state = gr.State(), gr.State()
131
  with gr.Row():
132
  with gr.Column():
introduction.py CHANGED
@@ -17,6 +17,8 @@ top_md_1 = ("""
17
  🔥 FunClip现在集成了大语言模型智能剪辑功能,选择LLM模型进行体验吧~
18
  """)
19
 
 
 
20
  top_md_3 = ("""访问FunASR项目与论文能够帮助您深入了解ParaClipper中所使用的语音处理相关模型:
21
  <div align="center">
22
  <div style="display:flex; gap: 0.25rem;" align="center">
@@ -35,3 +37,41 @@ top_md_4 = ("""我们在「LLM智能裁剪」模块中提供三种LLM调用方
35
  其中方式1与方式2需要在界面中传入相应的apikey
36
  方式3而可能非常不稳定,返回时间可能很长或者结果获取失败,可以多多尝试或者自己准备sk使用方式1
37
  """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  🔥 FunClip现在集成了大语言模型智能剪辑功能,选择LLM模型进行体验吧~
18
  """)
19
 
20
+ top_md_2 = ("通过Modelscope创空间的访问可能由于多人同时使用的原因不稳定,推荐通过github项目中的代码自己部署。")
21
+
22
  top_md_3 = ("""访问FunASR项目与论文能够帮助您深入了解ParaClipper中所使用的语音处理相关模型:
23
  <div align="center">
24
  <div style="display:flex; gap: 0.25rem;" align="center">
 
37
  其中方式1与方式2需要在界面中传入相应的apikey
38
  方式3而可能非常不稳定,返回时间可能很长或者结果获取失败,可以多多尝试或者自己准备sk使用方式1
39
  """)
40
+
41
+ top_md_1_en = ("""
42
+ <div align="center">
43
+ <div style="display:flex; gap: 0.25rem;" align="center">
44
+ FunClip: <a href='https://github.com/alibaba-damo-academy/FunClip'><img src='https://img.shields.io/badge/Github-Code-blue'></a>
45
+ 🌟 Support Us: <a href='https://github.com/alibaba-damo-academy/FunClip/stargazers'><img src='https://img.shields.io/github/stars/alibaba-damo-academy/FunClip.svg?style=social'></a>
46
+ </div>
47
+ </div>
48
+
49
+ Powered by Alibaba DAMO Academy's in-house developed and open-sourced [FunASR](https://github.com/alibaba-damo-academy/FunASR) toolkit and the Paraformer series models, offering open link to voice recognition, endpoint detection, punctuation prediction, timestamp prediction, speaker differentiation, and customization of hot words.
50
+ Accurate recognition, freely copy the required paragraphs, or set speaker labels, one-click clipping, adding subtitles
51
+ * Step1: Upload a video or audio file (or use the case below for a demo), then click **<font color="#f7802b">Recognize</font>** button
52
+ * Step2: Copy the required text from the recognition results to the top right, or set speaking indicators on the right and configure offset and subtitle settings (optional)
53
+ * Step3: Click the **<font color="#f7802b">Clip</font>** button or **<font color="#f7802b">Clip and Add Subtitles</font>** button to get the result
54
+
55
+ 🔥 FunClip now integrates the smart editing functionality with large language models, choose an LLM model for a try~
56
+ """)
57
+
58
+ top_md_2_en = ("Access to the Modelscope creation space may be unstable due to simultaneous use by multiple users. It is recommended to deploy the code through the GitHub project yourself.")
59
+
60
+ top_md_3_en = ("""Visiting the FunASR project and paper can help you gain an in-depth understanding of the speech processing models used in ParaClipper:
61
+ <div align="center">
62
+ <div style="display:flex; gap: 0.25rem;" align="center">
63
+ FunASR: <a href='https://github.com/alibaba-damo-academy/FunASR'><img src='https://img.shields.io/badge/Github-Code-blue'></a>
64
+ FunASR Paper: <a href="https://arxiv.org/abs/2305.11013"><img src="https://img.shields.io/badge/Arxiv-2305.11013-orange"></a>
65
+ 🌟Star FunASR: <a href='https://github.com/alibaba-damo-academy/FunASR/stargazers'><img src='https://img.shields.io/github/stars/alibaba-damo-academy/FunASR.svg?style=social'></a>
66
+ </div>
67
+ </div>
68
+ """)
69
+
70
+ top_md_4_en = ("""We provide three ways to use LLMs in the 'LLM Clipping' module:
71
+ 1. Choose Alibaba Cloud's BaiLian platform to call qwen series models via API, for which you need to prepare the BaiLian platform's apikey. Please visit [Alibaba Cloud BaiLian](https://bailian.console.aliyun.com/#/home);
72
+ 2. Choosing models that start with GPT denotes the use of the official OpenAI API, for which you need to provide your own sk and network environment;
73
+ 3. The [gpt4free](https://github.com/xtekky/gpt4free?tab=readme-ov-file) project is also integrated into FunClip, allowing free use of the gpt model;
74
+
75
+ Both methods 1 and 2 require you to enter the respective apikey in the interface.
76
+ Method 3 may be highly unstable, with potential long return times or failure to retrieve results. You may try multiple times or prepare your own sk to use method 1.
77
+ """)