File size: 13,028 Bytes
3b4a171
 
 
 
 
 
d2eea04
3b4a171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f5e00b
3b4a171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f5e00b
3b4a171
 
 
 
 
 
 
 
 
 
 
 
 
7f5e00b
3b4a171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f5e00b
3b4a171
7f5e00b
3b4a171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3b50404
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
import os

import gradio as gr
from huggingface_hub import Repository
from text_generation import Client



HF_TOKEN = os.environ.get("HF_TOKEN", None)
API_TOKEN = os.environ.get("API_TOKEN", None)
API_URL = os.environ.get("API_URL", None)
API_URL = "https://api-inference.huggingface.co/models/timdettmers/guanaco-33b-merged"

client = Client(
    API_URL,
    headers={"Authorization": f"Bearer {API_TOKEN}"},
)

repo = None


def get_total_inputs(inputs, chatbot, preprompt, user_name, assistant_name, sep):
    past = []
    for data in chatbot:
        user_data, model_data = data

        if not user_data.startswith(user_name):
            user_data = user_name + user_data
        if not model_data.startswith(sep + assistant_name):
            model_data = sep + assistant_name + model_data

        past.append(user_data + model_data.rstrip() + sep)

    if not inputs.startswith(user_name):
        inputs = user_name + inputs

    total_inputs = preprompt + "".join(past) + inputs + sep + assistant_name.rstrip()

    return total_inputs


def has_no_history(chatbot, history):
    return not chatbot and not history


header = """My name is Karthik raja, I live in Chennai, India. I recently completed my bachelors at SSN College of Engineering.He is an experienced programmer, I have honed my skills in competitive programming and machine learning. Through my work in these areas, I have
developed a strong foundation in data analysis and model selection, which has allowed me to achieve high accuracy in my projects. My expertise
extends to computer vision and natural language processing, and I am particularly interested in exploring cutting‐edge techniques like few‐shot
learning and other meta‐learning methods to enhance NLP applications. I have taken part in several ML competitions, including Imageclef and
Hasoc, and have consistently ranked highly. I have also been exploring multilingual model analysis, leveraging the power of few‐shot learning
to develop highly efficient and accurate models. Overall, my expertise in programming, machine learning, and NLP, combined with my passion
for exploring cutting‐edge techniques such as few‐shot learning, make me a valuable asset to any team.
I completed my bachelors in SSN College Of Engineering Chennai, India in Computer Science and Engineering with a consolidated CGPA score of 8.9, betweeen 2019 to 2023.And this is my highest degree of qualification.
I did my industry internship at Citi Corp,India as a Website Developer between May 2022  and Aug 2022.
In this internship opportunity I was able to collabore with with a four‐member team to develop a full fledged website using springtools with data extraction from H2 database.
I have a stellar research profile as well, I have published 3 papers in conferences and 1 is underreview in a journal.
My first publication is on Neural Network for TB analysis which was created for CEURS-WS conference Image Clef contest published in 2021.
Second being Abusive and Threatening Language
Detection in Native Urdu Script Tweets Exploring Four Conventional Machine Learning Techniques and MLP
Fire conference where we used Naive Bayes,LSTM BERT with different tokenizing methods with translation.
Third being paper titled Offensive Text Prediction using Machine
Learning and Deep Learning Approaches Ceur‐ws conference, where we explored bagging like techniques with the models mentioned above.
I was able to publish my Final Year Project in a journal,Counterfactual Detection Neural Processing
Letters, this is under review.
Apart from papers I have also contributed to creation of application for the
National Institute of Siddha – Ministry of AYUSH(GoI), AIIMS Jodhpur, the Siddha Expert System between Sep‐Nov 2022, which was used to
 Analyzed Siddha prognosis transcripts written in the Tamil regional language and Built an expert system to perform a nine‐way classification of Siddha diseases.
I was also able to work for the Tamil Nadu State Police for Suspicious Vehicle Tracking System through multiple cameras between Feb 2022 ‐ July 2022.
Here we Analysed various DeepLearning models for feature extraction, techniques like key frame extraction and Explored various matching models like siamese and metric mesures like cosine distance for vehicle Reid.
We had to Use prebuilt kalman filter and DeepSORT models to increase precision and to avoid occlusion.In this project we Experimented with various object detection, localization, and tracking models.
In another one of my research endevors we were able to develop an arm prototype for a underwater vehicle for UnderWater Remote Operated Vehicle Lab in my undergrad college.
For this I Helped design an grabber arm using CAD, trained Yolo models for object detection and worked on design and movement for the arm,
Some of my other projects include
Non‐residential Builtup Area classification from medium resolution satellite Chennai, India
India Meteorological Department (IMD), Ministry of Earth Sciences (MoES). (for this we won the Smart India
Hackathon ).
Person ReId in a large scale system in undergrad college.
I have also contributed to open source and have regularly been part of octoberFest, and have contributed to popular libraries like Ivy Unify, for more info check out https://github.com/kitrak-rev.
Connect with me on either: https://www.linkedin.com/in/kitrak-rev/, or https://github.com/kitrak-rev.
These are my profile links
In my college I held the following positions:
• IEEECS Student Chapter Core Commitee Member (Vice Chair)
• IEEE Student Chapter Core Commitee Member (Treasurer)
• ACM Student Chapter Core Commitee Member (Event Deputy Head)
• Computer Society of India Student Chapter Core Committee Member (Vice Chair)
• SSN Coding Club Commitee Member (Competitive Programming Team)
I was given the task to explain BART model and its usage in Dall‐e like models in IVA pre‐conference workshop 2023.
My fullname is karthik Raja Anandan.
Assume you are karthik Raja Anandan mentioned in the above text, keeping this in mind, give polite answers to the following questions in first person. """
prompt_template = "###"+header+" Human: {query}\n### Assistant:{response}"

def generate(
    user_message,
    chatbot,
    history,
    temperature,
    top_p,
    max_new_tokens,
    repetition_penalty,
):
    # Don't return meaningless message when the input is empty
    if not user_message:
        print("Empty input")

    history.append(user_message)

    past_messages = []
    for data in chatbot:
        user_data, model_data = data

        past_messages.extend(
            [{"role": "user", "content": user_data}, {"role": "assistant", "content": model_data.rstrip()}]
        )

    if len(past_messages) < 1:
        prompt = header + prompt_template.format(query=user_message, response="")
    else:
        prompt = header
        for i in range(0, len(past_messages), 2):
            intermediate_prompt = prompt_template.format(query=past_messages[i]["content"], response=past_messages[i+1]["content"])
            print("intermediate: ", intermediate_prompt)
            prompt = prompt + '\n' + intermediate_prompt

        prompt = prompt + prompt_template.format(query=user_message, response="")


    generate_kwargs = {
        "temperature": temperature,
        "top_p": top_p,
        "max_new_tokens": max_new_tokens,
    }

    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        truncate=999,
        seed=42,
    )

    stream = client.generate_stream(
        prompt,
        **generate_kwargs,
    )

    output = ""
    for idx, response in enumerate(stream):
        if response.token.text == '':
            break

        if response.token.special:
            continue
        output += response.token.text
        if idx == 0:
            history.append(" " + output)
        else:
            history[-1] = output

        chat = [(history[i].strip(), history[i + 1].strip()) for i in range(0, len(history) - 1, 2)]

        yield chat, history, user_message, ""

    return chat, history, user_message, ""


examples = [
    "Give a short summary about you"
]


def clear_chat():
    return [], []


def process_example(args):
    for [x, y] in generate(args):
        pass
    return [x, y]


title = """<h1 align="center">Karthik Raja AI Clone 🙋‍♂️ </h1>"""
custom_css = """
#banner-image {
    display: block;
    margin-left: auto;
    margin-right: auto;
}
#chat-message {
    font-size: 14px;
    min-height: 300px;
}
"""

with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
    gr.HTML(title)

    with gr.Row():
        with gr.Column():
            gr.Markdown(
                """
            💻 This demo attempts to be a ai-clone of a person with prompts on the Guanaco 33B model, released together with the paper [QLoRA](https://arxiv.org/abs/2305.14314)
            <br />
            Note: The information given by the AI-clone may not be 100% accurate, check with the bot's owner to confirm.
            """
            )

    with gr.Row():
        with gr.Box():
            output = gr.Markdown("Ask any questions that you want to ask Karthik Raja")
            chatbot = gr.Chatbot(elem_id="chat-message", label="AI-clone of Karthik Raja")

    with gr.Row():
        with gr.Column(scale=3):
            user_message = gr.Textbox(placeholder="Enter your message here", show_label=False, elem_id="q-input")
            with gr.Row():
                send_button = gr.Button("Send", elem_id="send-btn", visible=True)

                clear_chat_button = gr.Button("Clear chat", elem_id="clear-btn", visible=True)

            with gr.Accordion(label="Parameters", open=False, elem_id="parameters-accordion"):
                temperature = gr.Slider(
                    label="Temperature",
                    value=0.7,
                    minimum=0.0,
                    maximum=1.0,
                    step=0.1,
                    interactive=True,
                    info="Higher values produce more diverse outputs",
                )
                top_p = gr.Slider(
                    label="Top-p (nucleus sampling)",
                    value=0.9,
                    minimum=0.0,
                    maximum=1,
                    step=0.05,
                    interactive=True,
                    info="Higher values sample more low-probability tokens",
                )
                max_new_tokens = gr.Slider(
                    label="Max new tokens",
                    value=1024,
                    minimum=0,
                    maximum=2048,
                    step=4,
                    interactive=True,
                    info="The maximum numbers of new tokens",
                )
                repetition_penalty = gr.Slider(
                    label="Repetition Penalty",
                    value=1.2,
                    minimum=0.0,
                    maximum=10,
                    step=0.1,
                    interactive=True,
                    info="The parameter for repetition penalty. 1.0 means no penalty.",
                )
            with gr.Row():
                gr.Examples(
                    examples=examples,
                    inputs=[user_message],
                    cache_examples=False,
                    fn=process_example,
                    outputs=[output],
                )

            with gr.Row():
                gr.Markdown(
                    "Disclaimer: The model can produce factually incorrect output, and should not be relied on to produce "
                    "factually accurate information. The model was trained on various public datasets; while great efforts "
                    "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
                    "biased, or otherwise offensive outputs.",
                    elem_classes=["disclaimer"],
                )


    history = gr.State([])
    last_user_message = gr.State("")

    user_message.submit(
        generate,
        inputs=[
            user_message,
            chatbot,
            history,
            temperature,
            top_p,
            max_new_tokens,
            repetition_penalty,
        ],
        outputs=[chatbot, history, last_user_message, user_message],
    )

    send_button.click(
        generate,
        inputs=[
            user_message,
            chatbot,
            history,
            temperature,
            top_p,
            max_new_tokens,
            repetition_penalty,
        ],
        outputs=[chatbot, history, last_user_message, user_message],
    )

    clear_chat_button.click(clear_chat, outputs=[chatbot, history])

demo.queue(concurrency_count=16).launch()