sudhir2016 commited on
Commit
7ef5f63
1 Parent(s): 5cce4db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -7
app.py CHANGED
@@ -1,10 +1,24 @@
1
  import gradio as gr
2
  import pandas as pd
3
- import sketch
4
- month=[['April 22', 207], ['May 22',209], ['June 22', 211], ['July 22', 190], ['August 22', 194], ['Sept 22', 199], ['Oct 22',187], ['Nov 22', 187], ['Dec 22', 205], ['Jan 23', 210], ['Feb 23', 210], ['March 23', 209]]
5
- df=pd.DataFrame(month,columns= ['Month', 'Max Demand in GW'])
6
- def answer(question):
7
- out=df.sketch.ask(question,call_display=False)
8
- return out
9
- demo = gr.Interface(fn=answer, inputs='text',outputs='text',examples=[['which month has the highest max demand']])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  demo.launch()
 
1
  import gradio as gr
2
  import pandas as pd
3
+ from torchvision.io import read_video
4
+ import torch.nn.functional as F
5
+ import torch, hiera
6
+ df=pd.read_csv('/content/Kinetic400.csv')
7
+ model = hiera.hiera_base_16x224(pretrained=True, checkpoint="mae_k400_ft_k400")
8
+ def recognize(vid):
9
+ frames, audio, info = read_video(vid, pts_unit='sec', output_format='THWC')
10
+ frames = frames.float() / 255 # Convert from byte to float
11
+ frames = torch.stack([frames[:64], frames[64:128]], dim=0)
12
+ frames = frames[:, ::4] # Sample every 4 frames
13
+ frames = frames.permute(0, 4, 1, 2, 3).contiguous()
14
+ frames = F.interpolate(frames, size=(16, 224, 224), mode="trilinear")
15
+ torch.Size([2, 3, 16, 224, 224])
16
+ frames = frames - torch.tensor([0.45, 0.45, 0.45]).view(1, -1, 1, 1, 1)
17
+ frames = frames / torch.tensor([0.225, 0.225, 0.255]).view(1, -1, 1, 1, 1)
18
+ out = model(frames)
19
+ out = out.mean(0)
20
+ out1=out.argmax(dim=-1).item()
21
+ out2=df.iloc[out1,1]
22
+ return out2
23
+ demo = gr.Interface(fn=recognize, inputs=gr.Video(type="file"),outputs='text',examples= [['dog.mp4']])
24
  demo.launch()