File size: 1,484 Bytes
65068f7
bee55aa
a875418
 
bee55aa
a875418
bee55aa
b7de61a
a875418
bee55aa
a875418
 
 
 
bee55aa
a875418
 
 
 
 
 
 
 
 
 
 
 
 
 
bee55aa
a875418
 
 
 
 
 
 
cfbc0a0
bee55aa
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import os
import logging
import streamlit as st
from git import Repo
from langchain import HuggingFaceHub, LLMChain

# Set page configuration
st.set_page_config(layout="wide", initial_sidebar_state="auto")

# Collect user inputs
repository_url = st.text_input("Enter GitHub repository URL:", "")
access_token = st.text_input("Enter GitHub access token (optional):", "")
debug_logging = st.checkbox("Enable debug logging")

# Run the process
if st.button("Run"):
    if debug_logging:
        logging.basicConfig(filename='log.txt', level=logging.DEBUG, format='%(asctime)s %(message)s')
        logging.debug('Starting the process')

    # Clone the repository
    local_path = "/tmp/repository"
    Repo.clone_from(repository_url, local_path, branch="main", env={"GIT_TERMINAL_PROMPT": "0", "GIT_SSL_NO_VERIFY": "true"})

    # Initialize Hugging Face model
    os.environ['HUGGINGFACEHUB_API_TOKEN'] = access_token
    hub_llm = HuggingFaceHub(repo_id='google/flan-t5-xl', model_kwargs={'temperature': 1e-10})

    # Create a prompt template and LLM chain
    prompt = f"What is the main purpose of the repository at {repository_url}?"
    llm_chain = LLMChain(prompt=prompt, llm=hub_llm)

    # Get the result
    answer = llm_chain.run()
    st.write("Answer:", answer)

    if debug_logging:
        logging.debug('Finished the process')

# Run pip freeze and pip install -r requirements.txt
os.system("pip freeze > requirements.txt")
os.system("pip install -r requirements.txt")