Anandhavalli2 commited on
Commit
042c583
1 Parent(s): d72bc8c

Initial commit with fine-tuned GPT-2 model and Streamlit app

Browse files
app.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import gradio as gr
4
+ import torch
5
+ #from transformers import GPT2LMHeadModel, GPT2Tokenizer
6
+
7
+ #import pickle
8
+
9
+
10
+ title = "🤖Deployment GUVI GPT Model using Hugging Face"
11
+ description = "Building open-domain chatbots is a challenging area for machine learning research."
12
+ examples = [["Guvi Details"]]
13
+
14
+ model_name = "fine_tuned_model123"
15
+ #model = GPT2LMHeadModel.from_pretrained(model_name)
16
+ #tokenizer = GPT2Tokenizer.from_pretrained(model_name)
17
+
18
+ # Load the tokenizer and model from Hugging Face Hub
19
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
20
+ model = AutoModelForCausalLM.from_pretrained(model_name)
21
+
22
+ def predict(input, history=[]):
23
+ # tokenize the new input sentence
24
+ new_user_input_ids = tokenizer.encode(
25
+ input + tokenizer.eos_token, return_tensors="pt"
26
+ )
27
+
28
+ # append the new user input tokens to the chat history
29
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
30
+
31
+ # generate a response
32
+ history = model.generate(
33
+ bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
34
+ ).tolist()
35
+
36
+ # convert the tokens to text, and then split the responses into lines
37
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
38
+ # print('decoded_response-->>'+str(response))
39
+ response = [
40
+ (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
41
+ ] # convert to tuples of list
42
+ # print('response-->>'+str(response))
43
+ return response, history
44
+
45
+
46
+ gr.Interface(
47
+ fn=predict,
48
+ title=title,
49
+ description=description,
50
+ examples=examples,
51
+ inputs=["text", "state"],
52
+ outputs=["chatbot", "state"],
53
+ theme="finlaymacklon/boxy_violet",
54
+ ).launch()
fine_tuned_model123/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.41.2",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
fine_tuned_model123/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.41.2"
6
+ }
fine_tuned_model123/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
fine_tuned_model123/special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
fine_tuned_model123/tokenizer_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "50256": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ }
13
+ },
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "errors": "replace",
18
+ "model_max_length": 1024,
19
+ "pad_token": null,
20
+ "tokenizer_class": "GPT2Tokenizer",
21
+ "unk_token": "<|endoftext|>"
22
+ }
fine_tuned_model123/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+
2
+ transformers
3
+ torch
4
+ streamlit