Darshan commited on
Commit
bae6852
1 Parent(s): 023a520

permissions issue fix

Browse files
Files changed (4) hide show
  1. Dockerfile +5 -3
  2. app.py +71 -39
  3. app/main.py +79 -0
  4. requirements.txt +6 -6
Dockerfile CHANGED
@@ -2,13 +2,15 @@
2
  FROM python:3.10.9
3
 
4
  # Copy the current directory contents into the container at .
5
- COPY . .
6
 
7
  # Set the working directory to /
8
- WORKDIR /
 
 
9
 
10
  # Install requirements.txt
11
  RUN pip install --no-cache-dir --upgrade -r /requirements.txt
12
 
13
  # Start the FastAPI app on port 7860, the default port expected by Spaces
14
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
2
  FROM python:3.10.9
3
 
4
  # Copy the current directory contents into the container at .
5
+ COPY ./app ./app
6
 
7
  # Set the working directory to /
8
+ WORKDIR /trans
9
+
10
+ EXPOSE 7860
11
 
12
  # Install requirements.txt
13
  RUN pip install --no-cache-dir --upgrade -r /requirements.txt
14
 
15
  # Start the FastAPI app on port 7860, the default port expected by Spaces
16
+ CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
app.py CHANGED
@@ -1,35 +1,17 @@
1
- from pydantic import BaseModel
2
-
3
- from .ConfigEnv import config
 
 
4
  from fastapi.middleware.cors import CORSMiddleware
5
 
6
- from langchain.llms import Clarifai
7
- from langchain.chains import LLMChain
8
- from langchain.prompts import PromptTemplate
9
- from TextGen import app
10
-
11
-
12
- class Generate(BaseModel):
13
- text: str
14
-
15
-
16
- def generate_text(prompt: str):
17
- if prompt == "":
18
- return {"detail": "Please provide a prompt."}
19
- else:
20
- prompt = PromptTemplate(template=prompt, input_variables=["Prompt"])
21
- llm = Clarifai(
22
- pat=config.CLARIFAI_PAT,
23
- user_id=config.USER_ID,
24
- app_id=config.APP_ID,
25
- model_id=config.MODEL_ID,
26
- model_version_id=config.MODEL_VERSION_ID,
27
- )
28
- llmchain = LLMChain(prompt=prompt, llm=llm)
29
- llm_response = llmchain.run({"Prompt": prompt})
30
- return Generate(text=llm_response)
31
 
 
 
 
32
 
 
33
  app.add_middleware(
34
  CORSMiddleware,
35
  allow_origins=["*"],
@@ -38,17 +20,67 @@ app.add_middleware(
38
  allow_headers=["*"],
39
  )
40
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
- @app.get("/", tags=["Home"])
43
- def api_home():
44
- return {"detail": "Welcome to FastAPI TextGen Tutorial!"}
 
 
 
 
 
 
 
 
45
 
 
 
 
 
 
 
 
 
 
46
 
47
- @app.post(
48
- "/api/generate",
49
- summary="Generate text from prompt",
50
- tags=["Generate"],
51
- response_model=Generate,
52
- )
53
- def inference(input_prompt: str):
54
- return generate_text(prompt=input_prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from typing import List
3
+ import torch
4
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
5
+ from IndicTransToolkit import IndicProcessor
6
  from fastapi.middleware.cors import CORSMiddleware
7
 
8
+ import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ os.environ["HF_HOME"] = "/.cache"
11
+ # Initialize FastAPI
12
+ app = FastAPI()
13
 
14
+ # Add CORS middleware
15
  app.add_middleware(
16
  CORSMiddleware,
17
  allow_origins=["*"],
 
20
  allow_headers=["*"],
21
  )
22
 
23
+ # Initialize models and processors
24
+ model = AutoModelForSeq2SeqLM.from_pretrained(
25
+ "ai4bharat/indictrans2-en-indic-1B", trust_remote_code=True
26
+ )
27
+ tokenizer = AutoTokenizer.from_pretrained(
28
+ "ai4bharat/indictrans2-en-indic-1B", trust_remote_code=True
29
+ )
30
+ ip = IndicProcessor(inference=True)
31
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
32
+ model = model.to(DEVICE)
33
+
34
 
35
+ def translate_text(sentences: List[str], target_lang: str):
36
+ try:
37
+ src_lang = "eng_Latn"
38
+ batch = ip.preprocess_batch(sentences, src_lang=src_lang, tgt_lang=target_lang)
39
+ inputs = tokenizer(
40
+ batch,
41
+ truncation=True,
42
+ padding="longest",
43
+ return_tensors="pt",
44
+ return_attention_mask=True,
45
+ ).to(DEVICE)
46
 
47
+ with torch.no_grad():
48
+ generated_tokens = model.generate(
49
+ **inputs,
50
+ use_cache=True,
51
+ min_length=0,
52
+ max_length=256,
53
+ num_beams=5,
54
+ num_return_sequences=1,
55
+ )
56
 
57
+ with tokenizer.as_target_tokenizer():
58
+ generated_tokens = tokenizer.batch_decode(
59
+ generated_tokens.detach().cpu().tolist(),
60
+ skip_special_tokens=True,
61
+ clean_up_tokenization_spaces=True,
62
+ )
63
+
64
+ translations = ip.postprocess_batch(generated_tokens, lang=target_lang)
65
+ return {
66
+ "translations": translations,
67
+ "source_language": src_lang,
68
+ "target_language": target_lang,
69
+ }
70
+ except Exception as e:
71
+ raise Exception(f"Translation failed: {str(e)}")
72
+
73
+
74
+ # FastAPI routes
75
+ @app.get("/health")
76
+ async def health_check():
77
+ return {"status": "healthy"}
78
+
79
+
80
+ @app.post("/translate")
81
+ async def translate_endpoint(sentences: List[str], target_lang: str):
82
+ try:
83
+ result = translate_text(sentences=sentences, target_lang=target_lang)
84
+ return result
85
+ except Exception as e:
86
+ raise HTTPException(status_code=500, detail=str(e))
app/main.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from typing import List
3
+ from pydantic import BaseModel
4
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
5
+ from IndicTransToolkit import IndicProcessor
6
+ from fastapi.middleware.cors import CORSMiddleware
7
+
8
+ app = FastAPI()
9
+
10
+ app.add_middleware(
11
+ CORSMiddleware,
12
+ allow_origins=["*"],
13
+ allow_credentials=True,
14
+ allow_methods=["*"],
15
+ allow_headers=["*"],
16
+ )
17
+
18
+ model = AutoModelForSeq2SeqLM.from_pretrained(
19
+ "ai4bharat/indictrans2-en-indic-1B", trust_remote_code=True
20
+ )
21
+ tokenizer = AutoTokenizer.from_pretrained(
22
+ "ai4bharat/indictrans2-en-indic-1B", trust_remote_code=True
23
+ )
24
+
25
+ ip = IndicProcessor(inference=True)
26
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
27
+ model = model.to(DEVICE)
28
+
29
+
30
+ def translate_text(sentences: List[str], target_lang: str):
31
+ try:
32
+ src_lang = "eng_Latn"
33
+ batch = ip.preprocess_batch(sentences, src_lang=src_lang, tgt_lang=target_lang)
34
+ inputs = tokenizer(
35
+ batch,
36
+ truncation=True,
37
+ padding="longest",
38
+ return_tensors="pt",
39
+ return_attention_mask=True,
40
+ ).to(DEVICE)
41
+
42
+ with torch.no_grad():
43
+ generated_tokens = model.generate(
44
+ **inputs,
45
+ use_cache=True,
46
+ min_length=0,
47
+ max_length=256,
48
+ num_beams=5,
49
+ num_return_sequences=1,
50
+ )
51
+
52
+ with tokenizer.as_target_tokenizer():
53
+ generated_tokens = tokenizer.batch_decode(
54
+ generated_tokens.detach().cpu().tolist(),
55
+ skip_special_tokens=True,
56
+ )
57
+
58
+ return generated_tokens
59
+ except Exception as e:
60
+ return str(e)
61
+
62
+
63
+ @app.get("/")
64
+ def read_root():
65
+ return {"Hello": "World"}
66
+
67
+
68
+ class TranslateRequest(BaseModel):
69
+ sentences: List[str]
70
+ target_lang: str
71
+
72
+
73
+ @app.post("/translate/")
74
+ def translate(request: TranslateRequest):
75
+ try:
76
+ result = translate_text(request.sentences, request.target_lang)
77
+ return result
78
+ except Exception as e:
79
+ raise HTTPException(status_code=500, detail=str(e))
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
- fastapi==0.99.1
2
  uvicorn
3
- requests
4
- pydantic==1.10.12
5
- langchain
6
- clarifai
7
- Pillow
 
1
+ fastapi
2
  uvicorn
3
+ torch
4
+ transformers
5
+ git+https://github.com/VarunGumma/IndicTransToolkit.git
6
+ python-multipart
7
+ pydantic