File size: 4,472 Bytes
5da3ef5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "nSYihpXyWYuv"
},
"outputs": [],
"source": [
"import pandas as pd\n",
"import torch\n",
"!pip install transformers"
]
},
{
"cell_type": "code",
"source": [
"!pip install -q gradio\n",
"import gradio as gr\n",
"from transformers import AutoModelForCausalLM, TrainingArguments, Trainer, pipeline, AutoTokenizer\n",
"model = AutoModelForCausalLM.from_pretrained('myn11/gpt2_hdl').to('cuda')\n",
"tokenizer = AutoTokenizer.from_pretrained('myn11/gpt2_hdl')"
],
"metadata": {
"id": "--rUMbt6Wii1"
},
"execution_count": 4,
"outputs": []
},
{
"cell_type": "code",
"source": [
"examples = [\n",
" ['summarise the code library IEEE;use IEEE.std_logic_1164.all;entity Top is port (dips_i : in std_logic_vector(3 downto 0);pbs_i : in std_logic;leds_o : out std_logic_vector(3 downto 0));end entity Top;architecture RTL of Top is begin leds_o <= dips_i when pbs_i=\"0\" else (others => \"1\"); end architecture RTL;'],\n",
" ['Explain the code library ieee;use ieee.std_logic_1164.all;entity test is port(clk : in std_logic;d : in std_logic ;q :out std_logic );end test;architecture rtl of test is begin process (clk) begin if rising_edge(clk) then q <= d; end if; end; end rtl;'],\n",
" ['Write a VHDL code for a read-only memory (ROM) module that takes an external data initialization file, and is parameterized by the data width, address width, and file name. The module should have an address input and a data output. It should read data from the file and provide the corresponding data output based on the given address.']\n",
"]\n",
"\n",
"title = \"GPT2 fine tuned for HDL\"\n",
"\n",
"def process_input(text):\n",
" input = tokenizer(text, truncation=True, return_tensors='pt').input_ids.to('cuda')\n",
" output = model.generate(input, max_length=1000, do_sample=True, top_k=50, top_p=0.92)\n",
" return tokenizer.batch_decode(output, skip_special_tokens=True)\n",
"\n",
"model_gui = gr.Interface(\n",
" process_input,\n",
" gr.Textbox(lines=3,label=\"Input\"),\n",
" gr.Textbox(lines=3, label=\"GPT2_HDL\"),\n",
" title=title,\n",
" examples=examples\n",
")\n",
"model_gui.launch(share=True)"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 611
},
"id": "adXT9D8jWiY3",
"outputId": "dad5cadf-1a6e-427f-d14f-d49151846bf0"
},
"execution_count": 5,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
"Running on public URL: https://90957029bf8cd47c54.gradio.live\n",
"\n",
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
]
},
{
"output_type": "display_data",
"data": {
"text/plain": [
"<IPython.core.display.HTML object>"
],
"text/html": [
"<div><iframe src=\"https://90957029bf8cd47c54.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
]
},
"metadata": {}
},
{
"output_type": "execute_result",
"data": {
"text/plain": []
},
"metadata": {},
"execution_count": 5
}
]
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "MAqjgbJCWiMH"
},
"execution_count": null,
"outputs": []
}
]
} |