pq / app.py
changxin's picture
Update app.py
e0ae937
import gradio as gr
import pulp
import pandas as pd
import openpyxl
import requests
import os
os.system('pip install paddlepaddle')
os.system('pip install paddleocr')
from paddleocr import PaddleOCR, draw_ocr
from PIL import Image
import torch
w_style="""
<style>
footer {visibility: hidden; }
</style>
"""
html_code='''
<iframe src="https://web.powerva.microsoft.com/environments/Default-51a58d6c-4fcf-4b75-8608-d00bf7f244d5/bots/new_bot_830e155fc862429e89683426b31c9bd5/webchat" height="500" frameborder="1" style="width:100%"></iframe>
'''
ht_text='''
<iframe width="800" height="800" frameborder="0" scrolling="no" src="https://dyrscomcn-my.sharepoint.com/personal/huangcaiguang_dyrs_com_cn/_layouts/15/Doc.aspx?sourcedoc={6d3da844-5b54-473b-a32a-08659600d015}&action=embedview&AllowTyping=True&ActiveCell='业绩'!A1&wdInConfigurator=True&wdInConfigurator=True&edesNext=false&ejss=false"></iframe>
'''
demo = gr.Blocks(css='''
#chedan {-webkit-transition-duration: 0.4s; transition-duration: 0.4s; border-radius: 40%}
#chedan:hover {background-color: #4CAF50; color: white;}
#chedan:active {background-color: #3e8e41; box-shadow: 0 5px #666; transform: translateY(4px);}
''')
def fx1(x):
return f"欢迎练习Gradio, {x}!"
def fx2(x,y):
m=pulp.LpProblem()
x=list(map(int,x.split(',')))
t=[pulp.LpVariable('t'+str(i),cat=pulp.LpBinary) for i in range(len(x))]
m+=pulp.lpDot(x,t)
m+=(pulp.lpDot(x,t)==y)
m.solve()
resu={'data':[x[i] for i in range(len(t)) if pulp.value(t[i])]}
return resu
def fx3(x):
df=pd.read_excel(x,header=0,engine='openpyxl')
return df
def fx4(x):
url_s="https://tts.baidu.com/text2audio?tex="+x+"&cuid=baike&lan=ZH&ie=utf-8&ctp=1&pdt=301&vol=9&rate=32&per=0"
return "点击音频文件 [voice]("+url_s+")"
def fx5():
return html_code
def fx6(img, lang):
ocr = PaddleOCR(use_angle_cls=True, lang={'中文':'ch','英文':'en','法文':'fr','德文':'german','韩文':'korean','日文':'japan'}[lang],use_gpu=False)
img_path = img.name
result = ocr.ocr(img_path, cls=True)
image = Image.open(img_path).convert('RGB')
boxes = [line[0] for line in result]
txts = [line[1][0] for line in result]
scores = [line[1][1] for line in result]
im_show = draw_ocr(image, boxes, txts, scores,
font_path='simfang.ttf')
im_show = Image.fromarray(im_show)
im_show.save('result.jpg')
return 'result.jpg'
model2 = torch.hub.load(
"AK391/animegan2-pytorch:main",
"generator",
pretrained=True,
device="cuda",
progress=False
)
face2paint = torch.hub.load(
'AK391/animegan2-pytorch:main', 'face2paint',
size=512, device="cuda",side_by_side=False
)
def fx7(img):
out = face2paint(model2, img)
return out
with demo:
gr.Markdown(
"""
# <center> WEB APP测试应用!
<div align=center><img src="https://pbihub.cn/uploads/images/201809/23/44/n6xk1x6UnN.gif" width="405" height="405" /></div>
""")
with gr.Tabs():
with gr.TabItem("测试1"):
text_input = gr.Textbox(placeholder='请输入测试字符串,如"畅心"',label="请输入测试内容",show_label=True)
text_output = gr.Textbox(label="输出结果",show_label=True)
tj_button = gr.Button("提交>>",elem_id="chedan")
with gr.TabItem("凑数"):
val_input = [gr.Textbox(placeholder='请输入凑数序列,如"1,3,5,10,16,54,32,48,6,7"',label="请输入待凑数序列",show_label=True),gr.Number(value=80,label="请输入凑数和值",show_label=True)]
json_output = gr.JSON(label="输出运算结果",show_label=True)
cs_button = gr.Button("开始凑数>>")
with gr.TabItem("文件交互"):
file_input = gr.File(file_count="single",label="请选择需要读取的excel文件",show_label=True)
table_output = gr.Dataframe(label="输出读取的表格数据",show_label=True)
dq_button = gr.Button("开始读取>>")
with gr.TabItem("TTS"):
TS_input = gr.Textbox(placeholder='请输入测试字符串,如"欢迎测试字符串转语音功能模块"',label="请输入测试内容",show_label=True)
audio_output = gr.Markdown(label="点击生成音频文件",show_label=True)
tts_button = gr.Button("开始转换>>")
with gr.TabItem("Power Virtual Agents"):
pva_button = gr.Button("调用机器人对话>>")
pva_output=gr.HTML(label="机器人聊天窗口",show_label=True)
with gr.TabItem("Excel365测试"):
gr.HTML(value=ht_text,label="在线版演示",show_label=True)
with gr.TabItem("OCR识别"):
ocr_input=[gr.Image(type='file', label='请提供你需要识别的照片'),gr.Dropdown(choices=['中文','英文','法文','德文','韩文','日文'], type="value", default='中文', label='请选择需要识别的语言')]
ocr_output=gr.Image(type='file', label='识别结果')
ocr_button = gr.Button("开始ocr识别")
with gr.TabItem("风格迁移"):
qy_input=gr.Image(type="pil",label="请选择需要风格迁移的照片")
qy_output=gr.Image(type="pil",label="输出迁移风格结果")
qy_button=gr.Button("开始调用模型")
tj_button.click(fx1, inputs=text_input, outputs=text_output)
cs_button.click(fx2, inputs=val_input, outputs=json_output,api_name="ghqj")
dq_button.click(fx3, inputs=file_input, outputs=table_output,api_name="df")
tts_button.click(fx4, inputs=TS_input, outputs=audio_output)
pva_button.click(fx5, inputs=[],outputs=pva_output)
ocr_button.click(fx6,inputs=ocr_input,outputs=ocr_output)
qy_button.click(fx7,inputs=qy_input,outputs=qy_output)
demo.launch(enable_queue=True)