File size: 2,761 Bytes
53b9cd0
 
 
 
d5681b3
53b9cd0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d5681b3
 
 
c21b473
d5681b3
 
 
 
 
 
 
 
53b9cd0
 
 
d5681b3
27b206c
c21b473
53b9cd0
 
 
27b206c
53b9cd0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a9b60a6
d5681b3
c21b473
53b9cd0
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import requests
import shutil,os,re

# Searching for the videos
def search_pexels(keyword, api_key, orientation='potrait', size='medium', endpoint='videos', num_pages=50):
    
  if orientation not in ['potrait', 'landscape', 'square']:
    raise Exception("Error! orientation must be one of {'square', 'landscape', 'potrait'}")

  if size not in ['medium', 'small', 'large']:
    raise Exception("Error! size must be one of ['medium', 'small', 'large']")

  base_url = 'https://api.pexels.com/'

  headers = {
      'Authorization': f'{api_key}'
  }

  url = f'{base_url}{endpoint}/search?query={keyword}&per_page={num_pages}&orientation={orientation}&size={size}'


  response = requests.get(url, headers=headers)

  # Check if request was successful (status code 200)
  if response.status_code == 200:
      data = response.json()
      return data
  else:
      print(f'Error: {response.status_code}')


# Video download function
def download_video(data, parent_path, height, width, links, i):
    for x in data['videos'] :
        if x['id'] in links:
            continue
        
        vid = x['video_files']
        for v in vid: 
            if v['height'] == height and v['width'] == width :
                with open(f"{os.path.join(parent_path,str(i) + '_' + str(v['id']))}.mp4", 'bw') as f:
                    f.write(requests.get(v['link']).content)
                    print("Sucessfully saved video in", os.path.join(parent_path,str(i) + '_' + str(v['id'])) + '.mp4')
                    return x['id']


# Utilizing the LLMs to find the relevant videos 
def generate_videos(product, api_key, orientation, height, width, llm_chain=None, sum_llm_chain=None):
    prod = product.strip().replace(" ", "_")
    links = []
    try :
        # Split the paragraph by sentences
            
        sentences = llm_chain.run(product.strip())
        print('Sentence :', sentences)
        
#         sentences = sentences.split(".")[:-1]
        sentences = [x.strip() for x in re.split(r'\d+\.', sentences) if len(x) > 6]
    
        
        # Create directory with the product's name
        if os.path.exists(prod):
            shutil.rmtree(prod)
        os.mkdir(prod)
        
        # Generate video for every sentence
        print("Keyword :")
        for i,s in enumerate(sentences):
            keyword = sum_llm_chain.run(s)
            print(i+1, ":", keyword)
            data = search_pexels(keyword, api_key, orientation.lower())
            link = download_video(data, prod, height, width, links,i)
            links.append(link)
            
        print("Success! videos has been generated")
    except Exception as e :
        print("Error! Failed generating videos")
        print(e)
    
    return prod, sentences