{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "id": "0paOn0yhDB63" }, "outputs": [], "source": [ "from hybrid_pipe import HybridQAPipeline\n", "from transformers import pipeline\n", "from transformers.pipelines import PIPELINE_REGISTRY\n", "\n", "from transformers import AutoModelForQuestionAnswering, TFAutoModelForQuestionAnswering\n" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "id": "DuwOF8yjDB66" }, "outputs": [], "source": [ "# Register new pipe\n", "PIPELINE_REGISTRY.register_pipeline(\n", " \"hybrid-qa\",\n", " pipeline_class=HybridQAPipeline,\n", " pt_model=AutoModelForQuestionAnswering,\n", " tf_model=TFAutoModelForQuestionAnswering\n", ")" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "id": "pf_tBYQsDB67", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "2d75ec1b-a844-441b-ca84-7859dd8eedc5" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565\n" ] } ], "source": [ "# Create pipe instance\n", "# Note: the model specified here does not matter, we just need to\n", "# pass something valid to satisfy the pipeline class=\n", "hybrid_pipe = pipeline(\"hybrid-qa\", model='datarpit/distilbert-base-uncased-finetuned-natural-questions')" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "KKv6ZS2LDB67", "outputId": "58f78991-1204-4714-af1c-bad70d120118" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.\n", "/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py:1141: UserWarning: Using the model-agnostic default `max_length` (=20) to control the generation length. We recommend setting `max_new_tokens` to control the maximum length of the generation.\n", " warnings.warn(\n" ] }, { "output_type": "execute_result", "data": { "text/plain": [ "{'guess': 'Oslo', 'confidence': 2.0940363768613864e-14}" ] }, "metadata": {}, "execution_count": 5 } ], "source": [ "# Inference testing!\n", "hybrid_pipe(question=\"What is the capital of Norway?\",context=\"The capital of Norway is Oslo\")" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 53 }, "id": "sgrDgs9-DB68", "outputId": "7fe9f733-f19b-43cb-e68c-33e302b2be43" }, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "CommitInfo(commit_url='https://huggingface.co/justinhl/hybrid-qa/commit/7019d3e4971d6c754e9529b5a3de9a0425c3cccf', commit_message='Upload HybridQAPipeline', commit_description='', oid='7019d3e4971d6c754e9529b5a3de9a0425c3cccf', pr_url=None, pr_revision=None, pr_num=None)" ], "application/vnd.google.colaboratory.intrinsic+json": { "type": "string" } }, "metadata": {}, "execution_count": 6 } ], "source": [ "# Pushing to hub\n", "hybrid_pipe.push_to_hub(\"hybrid-qa\")" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "PPOf6vUhDB68", "outputId": "1fa601e1-dfc7-4128-c430-44652916aa87" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "Some weights of the model checkpoint at justinhl/hybrid-qa were not used when initializing DistilBertForQuestionAnswering: ['model_extractive.distilbert.embeddings.LayerNorm.bias', 'model_extractive.distilbert.embeddings.LayerNorm.weight', 'model_extractive.distilbert.embeddings.position_embeddings.weight', 'model_extractive.distilbert.embeddings.word_embeddings.weight', 'model_extractive.distilbert.transformer.layer.0.attention.k_lin.bias', 'model_extractive.distilbert.transformer.layer.0.attention.k_lin.weight', 'model_extractive.distilbert.transformer.layer.0.attention.out_lin.bias', 'model_extractive.distilbert.transformer.layer.0.attention.out_lin.weight', 'model_extractive.distilbert.transformer.layer.0.attention.q_lin.bias', 'model_extractive.distilbert.transformer.layer.0.attention.q_lin.weight', 'model_extractive.distilbert.transformer.layer.0.attention.v_lin.bias', 'model_extractive.distilbert.transformer.layer.0.attention.v_lin.weight', 'model_extractive.distilbert.transformer.layer.0.ffn.lin1.bias', 'model_extractive.distilbert.transformer.layer.0.ffn.lin1.weight', 'model_extractive.distilbert.transformer.layer.0.ffn.lin2.bias', 'model_extractive.distilbert.transformer.layer.0.ffn.lin2.weight', 'model_extractive.distilbert.transformer.layer.0.output_layer_norm.bias', 'model_extractive.distilbert.transformer.layer.0.output_layer_norm.weight', 'model_extractive.distilbert.transformer.layer.0.sa_layer_norm.bias', 'model_extractive.distilbert.transformer.layer.0.sa_layer_norm.weight', 'model_extractive.distilbert.transformer.layer.1.attention.k_lin.bias', 'model_extractive.distilbert.transformer.layer.1.attention.k_lin.weight', 'model_extractive.distilbert.transformer.layer.1.attention.out_lin.bias', 'model_extractive.distilbert.transformer.layer.1.attention.out_lin.weight', 'model_extractive.distilbert.transformer.layer.1.attention.q_lin.bias', 'model_extractive.distilbert.transformer.layer.1.attention.q_lin.weight', 'model_extractive.distilbert.transformer.layer.1.attention.v_lin.bias', 'model_extractive.distilbert.transformer.layer.1.attention.v_lin.weight', 'model_extractive.distilbert.transformer.layer.1.ffn.lin1.bias', 'model_extractive.distilbert.transformer.layer.1.ffn.lin1.weight', 'model_extractive.distilbert.transformer.layer.1.ffn.lin2.bias', 'model_extractive.distilbert.transformer.layer.1.ffn.lin2.weight', 'model_extractive.distilbert.transformer.layer.1.output_layer_norm.bias', 'model_extractive.distilbert.transformer.layer.1.output_layer_norm.weight', 'model_extractive.distilbert.transformer.layer.1.sa_layer_norm.bias', 'model_extractive.distilbert.transformer.layer.1.sa_layer_norm.weight', 'model_extractive.distilbert.transformer.layer.2.attention.k_lin.bias', 'model_extractive.distilbert.transformer.layer.2.attention.k_lin.weight', 'model_extractive.distilbert.transformer.layer.2.attention.out_lin.bias', 'model_extractive.distilbert.transformer.layer.2.attention.out_lin.weight', 'model_extractive.distilbert.transformer.layer.2.attention.q_lin.bias', 'model_extractive.distilbert.transformer.layer.2.attention.q_lin.weight', 'model_extractive.distilbert.transformer.layer.2.attention.v_lin.bias', 'model_extractive.distilbert.transformer.layer.2.attention.v_lin.weight', 'model_extractive.distilbert.transformer.layer.2.ffn.lin1.bias', 'model_extractive.distilbert.transformer.layer.2.ffn.lin1.weight', 'model_extractive.distilbert.transformer.layer.2.ffn.lin2.bias', 'model_extractive.distilbert.transformer.layer.2.ffn.lin2.weight', 'model_extractive.distilbert.transformer.layer.2.output_layer_norm.bias', 'model_extractive.distilbert.transformer.layer.2.output_layer_norm.weight', 'model_extractive.distilbert.transformer.layer.2.sa_layer_norm.bias', 'model_extractive.distilbert.transformer.layer.2.sa_layer_norm.weight', 'model_extractive.distilbert.transformer.layer.3.attention.k_lin.bias', 'model_extractive.distilbert.transformer.layer.3.attention.k_lin.weight', 'model_extractive.distilbert.transformer.layer.3.attention.out_lin.bias', 'model_extractive.distilbert.transformer.layer.3.attention.out_lin.weight', 'model_extractive.distilbert.transformer.layer.3.attention.q_lin.bias', 'model_extractive.distilbert.transformer.layer.3.attention.q_lin.weight', 'model_extractive.distilbert.transformer.layer.3.attention.v_lin.bias', 'model_extractive.distilbert.transformer.layer.3.attention.v_lin.weight', 'model_extractive.distilbert.transformer.layer.3.ffn.lin1.bias', 'model_extractive.distilbert.transformer.layer.3.ffn.lin1.weight', 'model_extractive.distilbert.transformer.layer.3.ffn.lin2.bias', 'model_extractive.distilbert.transformer.layer.3.ffn.lin2.weight', 'model_extractive.distilbert.transformer.layer.3.output_layer_norm.bias', 'model_extractive.distilbert.transformer.layer.3.output_layer_norm.weight', 'model_extractive.distilbert.transformer.layer.3.sa_layer_norm.bias', 'model_extractive.distilbert.transformer.layer.3.sa_layer_norm.weight', 'model_extractive.distilbert.transformer.layer.4.attention.k_lin.bias', 'model_extractive.distilbert.transformer.layer.4.attention.k_lin.weight', 'model_extractive.distilbert.transformer.layer.4.attention.out_lin.bias', 'model_extractive.distilbert.transformer.layer.4.attention.out_lin.weight', 'model_extractive.distilbert.transformer.layer.4.attention.q_lin.bias', 'model_extractive.distilbert.transformer.layer.4.attention.q_lin.weight', 'model_extractive.distilbert.transformer.layer.4.attention.v_lin.bias', 'model_extractive.distilbert.transformer.layer.4.attention.v_lin.weight', 'model_extractive.distilbert.transformer.layer.4.ffn.lin1.bias', 'model_extractive.distilbert.transformer.layer.4.ffn.lin1.weight', 'model_extractive.distilbert.transformer.layer.4.ffn.lin2.bias', 'model_extractive.distilbert.transformer.layer.4.ffn.lin2.weight', 'model_extractive.distilbert.transformer.layer.4.output_layer_norm.bias', 'model_extractive.distilbert.transformer.layer.4.output_layer_norm.weight', 'model_extractive.distilbert.transformer.layer.4.sa_layer_norm.bias', 'model_extractive.distilbert.transformer.layer.4.sa_layer_norm.weight', 'model_extractive.distilbert.transformer.layer.5.attention.k_lin.bias', 'model_extractive.distilbert.transformer.layer.5.attention.k_lin.weight', 'model_extractive.distilbert.transformer.layer.5.attention.out_lin.bias', 'model_extractive.distilbert.transformer.layer.5.attention.out_lin.weight', 'model_extractive.distilbert.transformer.layer.5.attention.q_lin.bias', 'model_extractive.distilbert.transformer.layer.5.attention.q_lin.weight', 'model_extractive.distilbert.transformer.layer.5.attention.v_lin.bias', 'model_extractive.distilbert.transformer.layer.5.attention.v_lin.weight', 'model_extractive.distilbert.transformer.layer.5.ffn.lin1.bias', 'model_extractive.distilbert.transformer.layer.5.ffn.lin1.weight', 'model_extractive.distilbert.transformer.layer.5.ffn.lin2.bias', 'model_extractive.distilbert.transformer.layer.5.ffn.lin2.weight', 'model_extractive.distilbert.transformer.layer.5.output_layer_norm.bias', 'model_extractive.distilbert.transformer.layer.5.output_layer_norm.weight', 'model_extractive.distilbert.transformer.layer.5.sa_layer_norm.bias', 'model_extractive.distilbert.transformer.layer.5.sa_layer_norm.weight', 'model_extractive.qa_outputs.bias', 'model_extractive.qa_outputs.weight', 'model_generative.decoder.block.0.layer.0.SelfAttention.k.weight', 'model_generative.decoder.block.0.layer.0.SelfAttention.o.weight', 'model_generative.decoder.block.0.layer.0.SelfAttention.q.weight', 'model_generative.decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight', 'model_generative.decoder.block.0.layer.0.SelfAttention.v.weight', 'model_generative.decoder.block.0.layer.0.layer_norm.weight', 'model_generative.decoder.block.0.layer.1.EncDecAttention.k.weight', 'model_generative.decoder.block.0.layer.1.EncDecAttention.o.weight', 'model_generative.decoder.block.0.layer.1.EncDecAttention.q.weight', 'model_generative.decoder.block.0.layer.1.EncDecAttention.v.weight', 'model_generative.decoder.block.0.layer.1.layer_norm.weight', 'model_generative.decoder.block.0.layer.2.DenseReluDense.wi.weight', 'model_generative.decoder.block.0.layer.2.DenseReluDense.wo.weight', 'model_generative.decoder.block.0.layer.2.layer_norm.weight', 'model_generative.decoder.block.1.layer.0.SelfAttention.k.weight', 'model_generative.decoder.block.1.layer.0.SelfAttention.o.weight', 'model_generative.decoder.block.1.layer.0.SelfAttention.q.weight', 'model_generative.decoder.block.1.layer.0.SelfAttention.v.weight', 'model_generative.decoder.block.1.layer.0.layer_norm.weight', 'model_generative.decoder.block.1.layer.1.EncDecAttention.k.weight', 'model_generative.decoder.block.1.layer.1.EncDecAttention.o.weight', 'model_generative.decoder.block.1.layer.1.EncDecAttention.q.weight', 'model_generative.decoder.block.1.layer.1.EncDecAttention.v.weight', 'model_generative.decoder.block.1.layer.1.layer_norm.weight', 'model_generative.decoder.block.1.layer.2.DenseReluDense.wi.weight', 'model_generative.decoder.block.1.layer.2.DenseReluDense.wo.weight', 'model_generative.decoder.block.1.layer.2.layer_norm.weight', 'model_generative.decoder.block.10.layer.0.SelfAttention.k.weight', 'model_generative.decoder.block.10.layer.0.SelfAttention.o.weight', 'model_generative.decoder.block.10.layer.0.SelfAttention.q.weight', 'model_generative.decoder.block.10.layer.0.SelfAttention.v.weight', 'model_generative.decoder.block.10.layer.0.layer_norm.weight', 'model_generative.decoder.block.10.layer.1.EncDecAttention.k.weight', 'model_generative.decoder.block.10.layer.1.EncDecAttention.o.weight', 'model_generative.decoder.block.10.layer.1.EncDecAttention.q.weight', 'model_generative.decoder.block.10.layer.1.EncDecAttention.v.weight', 'model_generative.decoder.block.10.layer.1.layer_norm.weight', 'model_generative.decoder.block.10.layer.2.DenseReluDense.wi.weight', 'model_generative.decoder.block.10.layer.2.DenseReluDense.wo.weight', 'model_generative.decoder.block.10.layer.2.layer_norm.weight', 'model_generative.decoder.block.11.layer.0.SelfAttention.k.weight', 'model_generative.decoder.block.11.layer.0.SelfAttention.o.weight', 'model_generative.decoder.block.11.layer.0.SelfAttention.q.weight', 'model_generative.decoder.block.11.layer.0.SelfAttention.v.weight', 'model_generative.decoder.block.11.layer.0.layer_norm.weight', 'model_generative.decoder.block.11.layer.1.EncDecAttention.k.weight', 'model_generative.decoder.block.11.layer.1.EncDecAttention.o.weight', 'model_generative.decoder.block.11.layer.1.EncDecAttention.q.weight', 'model_generative.decoder.block.11.layer.1.EncDecAttention.v.weight', 'model_generative.decoder.block.11.layer.1.layer_norm.weight', 'model_generative.decoder.block.11.layer.2.DenseReluDense.wi.weight', 'model_generative.decoder.block.11.layer.2.DenseReluDense.wo.weight', 'model_generative.decoder.block.11.layer.2.layer_norm.weight', 'model_generative.decoder.block.2.layer.0.SelfAttention.k.weight', 'model_generative.decoder.block.2.layer.0.SelfAttention.o.weight', 'model_generative.decoder.block.2.layer.0.SelfAttention.q.weight', 'model_generative.decoder.block.2.layer.0.SelfAttention.v.weight', 'model_generative.decoder.block.2.layer.0.layer_norm.weight', 'model_generative.decoder.block.2.layer.1.EncDecAttention.k.weight', 'model_generative.decoder.block.2.layer.1.EncDecAttention.o.weight', 'model_generative.decoder.block.2.layer.1.EncDecAttention.q.weight', 'model_generative.decoder.block.2.layer.1.EncDecAttention.v.weight', 'model_generative.decoder.block.2.layer.1.layer_norm.weight', 'model_generative.decoder.block.2.layer.2.DenseReluDense.wi.weight', 'model_generative.decoder.block.2.layer.2.DenseReluDense.wo.weight', 'model_generative.decoder.block.2.layer.2.layer_norm.weight', 'model_generative.decoder.block.3.layer.0.SelfAttention.k.weight', 'model_generative.decoder.block.3.layer.0.SelfAttention.o.weight', 'model_generative.decoder.block.3.layer.0.SelfAttention.q.weight', 'model_generative.decoder.block.3.layer.0.SelfAttention.v.weight', 'model_generative.decoder.block.3.layer.0.layer_norm.weight', 'model_generative.decoder.block.3.layer.1.EncDecAttention.k.weight', 'model_generative.decoder.block.3.layer.1.EncDecAttention.o.weight', 'model_generative.decoder.block.3.layer.1.EncDecAttention.q.weight', 'model_generative.decoder.block.3.layer.1.EncDecAttention.v.weight', 'model_generative.decoder.block.3.layer.1.layer_norm.weight', 'model_generative.decoder.block.3.layer.2.DenseReluDense.wi.weight', 'model_generative.decoder.block.3.layer.2.DenseReluDense.wo.weight', 'model_generative.decoder.block.3.layer.2.layer_norm.weight', 'model_generative.decoder.block.4.layer.0.SelfAttention.k.weight', 'model_generative.decoder.block.4.layer.0.SelfAttention.o.weight', 'model_generative.decoder.block.4.layer.0.SelfAttention.q.weight', 'model_generative.decoder.block.4.layer.0.SelfAttention.v.weight', 'model_generative.decoder.block.4.layer.0.layer_norm.weight', 'model_generative.decoder.block.4.layer.1.EncDecAttention.k.weight', 'model_generative.decoder.block.4.layer.1.EncDecAttention.o.weight', 'model_generative.decoder.block.4.layer.1.EncDecAttention.q.weight', 'model_generative.decoder.block.4.layer.1.EncDecAttention.v.weight', 'model_generative.decoder.block.4.layer.1.layer_norm.weight', 'model_generative.decoder.block.4.layer.2.DenseReluDense.wi.weight', 'model_generative.decoder.block.4.layer.2.DenseReluDense.wo.weight', 'model_generative.decoder.block.4.layer.2.layer_norm.weight', 'model_generative.decoder.block.5.layer.0.SelfAttention.k.weight', 'model_generative.decoder.block.5.layer.0.SelfAttention.o.weight', 'model_generative.decoder.block.5.layer.0.SelfAttention.q.weight', 'model_generative.decoder.block.5.layer.0.SelfAttention.v.weight', 'model_generative.decoder.block.5.layer.0.layer_norm.weight', 'model_generative.decoder.block.5.layer.1.EncDecAttention.k.weight', 'model_generative.decoder.block.5.layer.1.EncDecAttention.o.weight', 'model_generative.decoder.block.5.layer.1.EncDecAttention.q.weight', 'model_generative.decoder.block.5.layer.1.EncDecAttention.v.weight', 'model_generative.decoder.block.5.layer.1.layer_norm.weight', 'model_generative.decoder.block.5.layer.2.DenseReluDense.wi.weight', 'model_generative.decoder.block.5.layer.2.DenseReluDense.wo.weight', 'model_generative.decoder.block.5.layer.2.layer_norm.weight', 'model_generative.decoder.block.6.layer.0.SelfAttention.k.weight', 'model_generative.decoder.block.6.layer.0.SelfAttention.o.weight', 'model_generative.decoder.block.6.layer.0.SelfAttention.q.weight', 'model_generative.decoder.block.6.layer.0.SelfAttention.v.weight', 'model_generative.decoder.block.6.layer.0.layer_norm.weight', 'model_generative.decoder.block.6.layer.1.EncDecAttention.k.weight', 'model_generative.decoder.block.6.layer.1.EncDecAttention.o.weight', 'model_generative.decoder.block.6.layer.1.EncDecAttention.q.weight', 'model_generative.decoder.block.6.layer.1.EncDecAttention.v.weight', 'model_generative.decoder.block.6.layer.1.layer_norm.weight', 'model_generative.decoder.block.6.layer.2.DenseReluDense.wi.weight', 'model_generative.decoder.block.6.layer.2.DenseReluDense.wo.weight', 'model_generative.decoder.block.6.layer.2.layer_norm.weight', 'model_generative.decoder.block.7.layer.0.SelfAttention.k.weight', 'model_generative.decoder.block.7.layer.0.SelfAttention.o.weight', 'model_generative.decoder.block.7.layer.0.SelfAttention.q.weight', 'model_generative.decoder.block.7.layer.0.SelfAttention.v.weight', 'model_generative.decoder.block.7.layer.0.layer_norm.weight', 'model_generative.decoder.block.7.layer.1.EncDecAttention.k.weight', 'model_generative.decoder.block.7.layer.1.EncDecAttention.o.weight', 'model_generative.decoder.block.7.layer.1.EncDecAttention.q.weight', 'model_generative.decoder.block.7.layer.1.EncDecAttention.v.weight', 'model_generative.decoder.block.7.layer.1.layer_norm.weight', 'model_generative.decoder.block.7.layer.2.DenseReluDense.wi.weight', 'model_generative.decoder.block.7.layer.2.DenseReluDense.wo.weight', 'model_generative.decoder.block.7.layer.2.layer_norm.weight', 'model_generative.decoder.block.8.layer.0.SelfAttention.k.weight', 'model_generative.decoder.block.8.layer.0.SelfAttention.o.weight', 'model_generative.decoder.block.8.layer.0.SelfAttention.q.weight', 'model_generative.decoder.block.8.layer.0.SelfAttention.v.weight', 'model_generative.decoder.block.8.layer.0.layer_norm.weight', 'model_generative.decoder.block.8.layer.1.EncDecAttention.k.weight', 'model_generative.decoder.block.8.layer.1.EncDecAttention.o.weight', 'model_generative.decoder.block.8.layer.1.EncDecAttention.q.weight', 'model_generative.decoder.block.8.layer.1.EncDecAttention.v.weight', 'model_generative.decoder.block.8.layer.1.layer_norm.weight', 'model_generative.decoder.block.8.layer.2.DenseReluDense.wi.weight', 'model_generative.decoder.block.8.layer.2.DenseReluDense.wo.weight', 'model_generative.decoder.block.8.layer.2.layer_norm.weight', 'model_generative.decoder.block.9.layer.0.SelfAttention.k.weight', 'model_generative.decoder.block.9.layer.0.SelfAttention.o.weight', 'model_generative.decoder.block.9.layer.0.SelfAttention.q.weight', 'model_generative.decoder.block.9.layer.0.SelfAttention.v.weight', 'model_generative.decoder.block.9.layer.0.layer_norm.weight', 'model_generative.decoder.block.9.layer.1.EncDecAttention.k.weight', 'model_generative.decoder.block.9.layer.1.EncDecAttention.o.weight', 'model_generative.decoder.block.9.layer.1.EncDecAttention.q.weight', 'model_generative.decoder.block.9.layer.1.EncDecAttention.v.weight', 'model_generative.decoder.block.9.layer.1.layer_norm.weight', 'model_generative.decoder.block.9.layer.2.DenseReluDense.wi.weight', 'model_generative.decoder.block.9.layer.2.DenseReluDense.wo.weight', 'model_generative.decoder.block.9.layer.2.layer_norm.weight', 'model_generative.decoder.embed_tokens.weight', 'model_generative.decoder.final_layer_norm.weight', 'model_generative.encoder.block.0.layer.0.SelfAttention.k.weight', 'model_generative.encoder.block.0.layer.0.SelfAttention.o.weight', 'model_generative.encoder.block.0.layer.0.SelfAttention.q.weight', 'model_generative.encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight', 'model_generative.encoder.block.0.layer.0.SelfAttention.v.weight', 'model_generative.encoder.block.0.layer.0.layer_norm.weight', 'model_generative.encoder.block.0.layer.1.DenseReluDense.wi.weight', 'model_generative.encoder.block.0.layer.1.DenseReluDense.wo.weight', 'model_generative.encoder.block.0.layer.1.layer_norm.weight', 'model_generative.encoder.block.1.layer.0.SelfAttention.k.weight', 'model_generative.encoder.block.1.layer.0.SelfAttention.o.weight', 'model_generative.encoder.block.1.layer.0.SelfAttention.q.weight', 'model_generative.encoder.block.1.layer.0.SelfAttention.v.weight', 'model_generative.encoder.block.1.layer.0.layer_norm.weight', 'model_generative.encoder.block.1.layer.1.DenseReluDense.wi.weight', 'model_generative.encoder.block.1.layer.1.DenseReluDense.wo.weight', 'model_generative.encoder.block.1.layer.1.layer_norm.weight', 'model_generative.encoder.block.10.layer.0.SelfAttention.k.weight', 'model_generative.encoder.block.10.layer.0.SelfAttention.o.weight', 'model_generative.encoder.block.10.layer.0.SelfAttention.q.weight', 'model_generative.encoder.block.10.layer.0.SelfAttention.v.weight', 'model_generative.encoder.block.10.layer.0.layer_norm.weight', 'model_generative.encoder.block.10.layer.1.DenseReluDense.wi.weight', 'model_generative.encoder.block.10.layer.1.DenseReluDense.wo.weight', 'model_generative.encoder.block.10.layer.1.layer_norm.weight', 'model_generative.encoder.block.11.layer.0.SelfAttention.k.weight', 'model_generative.encoder.block.11.layer.0.SelfAttention.o.weight', 'model_generative.encoder.block.11.layer.0.SelfAttention.q.weight', 'model_generative.encoder.block.11.layer.0.SelfAttention.v.weight', 'model_generative.encoder.block.11.layer.0.layer_norm.weight', 'model_generative.encoder.block.11.layer.1.DenseReluDense.wi.weight', 'model_generative.encoder.block.11.layer.1.DenseReluDense.wo.weight', 'model_generative.encoder.block.11.layer.1.layer_norm.weight', 'model_generative.encoder.block.2.layer.0.SelfAttention.k.weight', 'model_generative.encoder.block.2.layer.0.SelfAttention.o.weight', 'model_generative.encoder.block.2.layer.0.SelfAttention.q.weight', 'model_generative.encoder.block.2.layer.0.SelfAttention.v.weight', 'model_generative.encoder.block.2.layer.0.layer_norm.weight', 'model_generative.encoder.block.2.layer.1.DenseReluDense.wi.weight', 'model_generative.encoder.block.2.layer.1.DenseReluDense.wo.weight', 'model_generative.encoder.block.2.layer.1.layer_norm.weight', 'model_generative.encoder.block.3.layer.0.SelfAttention.k.weight', 'model_generative.encoder.block.3.layer.0.SelfAttention.o.weight', 'model_generative.encoder.block.3.layer.0.SelfAttention.q.weight', 'model_generative.encoder.block.3.layer.0.SelfAttention.v.weight', 'model_generative.encoder.block.3.layer.0.layer_norm.weight', 'model_generative.encoder.block.3.layer.1.DenseReluDense.wi.weight', 'model_generative.encoder.block.3.layer.1.DenseReluDense.wo.weight', 'model_generative.encoder.block.3.layer.1.layer_norm.weight', 'model_generative.encoder.block.4.layer.0.SelfAttention.k.weight', 'model_generative.encoder.block.4.layer.0.SelfAttention.o.weight', 'model_generative.encoder.block.4.layer.0.SelfAttention.q.weight', 'model_generative.encoder.block.4.layer.0.SelfAttention.v.weight', 'model_generative.encoder.block.4.layer.0.layer_norm.weight', 'model_generative.encoder.block.4.layer.1.DenseReluDense.wi.weight', 'model_generative.encoder.block.4.layer.1.DenseReluDense.wo.weight', 'model_generative.encoder.block.4.layer.1.layer_norm.weight', 'model_generative.encoder.block.5.layer.0.SelfAttention.k.weight', 'model_generative.encoder.block.5.layer.0.SelfAttention.o.weight', 'model_generative.encoder.block.5.layer.0.SelfAttention.q.weight', 'model_generative.encoder.block.5.layer.0.SelfAttention.v.weight', 'model_generative.encoder.block.5.layer.0.layer_norm.weight', 'model_generative.encoder.block.5.layer.1.DenseReluDense.wi.weight', 'model_generative.encoder.block.5.layer.1.DenseReluDense.wo.weight', 'model_generative.encoder.block.5.layer.1.layer_norm.weight', 'model_generative.encoder.block.6.layer.0.SelfAttention.k.weight', 'model_generative.encoder.block.6.layer.0.SelfAttention.o.weight', 'model_generative.encoder.block.6.layer.0.SelfAttention.q.weight', 'model_generative.encoder.block.6.layer.0.SelfAttention.v.weight', 'model_generative.encoder.block.6.layer.0.layer_norm.weight', 'model_generative.encoder.block.6.layer.1.DenseReluDense.wi.weight', 'model_generative.encoder.block.6.layer.1.DenseReluDense.wo.weight', 'model_generative.encoder.block.6.layer.1.layer_norm.weight', 'model_generative.encoder.block.7.layer.0.SelfAttention.k.weight', 'model_generative.encoder.block.7.layer.0.SelfAttention.o.weight', 'model_generative.encoder.block.7.layer.0.SelfAttention.q.weight', 'model_generative.encoder.block.7.layer.0.SelfAttention.v.weight', 'model_generative.encoder.block.7.layer.0.layer_norm.weight', 'model_generative.encoder.block.7.layer.1.DenseReluDense.wi.weight', 'model_generative.encoder.block.7.layer.1.DenseReluDense.wo.weight', 'model_generative.encoder.block.7.layer.1.layer_norm.weight', 'model_generative.encoder.block.8.layer.0.SelfAttention.k.weight', 'model_generative.encoder.block.8.layer.0.SelfAttention.o.weight', 'model_generative.encoder.block.8.layer.0.SelfAttention.q.weight', 'model_generative.encoder.block.8.layer.0.SelfAttention.v.weight', 'model_generative.encoder.block.8.layer.0.layer_norm.weight', 'model_generative.encoder.block.8.layer.1.DenseReluDense.wi.weight', 'model_generative.encoder.block.8.layer.1.DenseReluDense.wo.weight', 'model_generative.encoder.block.8.layer.1.layer_norm.weight', 'model_generative.encoder.block.9.layer.0.SelfAttention.k.weight', 'model_generative.encoder.block.9.layer.0.SelfAttention.o.weight', 'model_generative.encoder.block.9.layer.0.SelfAttention.q.weight', 'model_generative.encoder.block.9.layer.0.SelfAttention.v.weight', 'model_generative.encoder.block.9.layer.0.layer_norm.weight', 'model_generative.encoder.block.9.layer.1.DenseReluDense.wi.weight', 'model_generative.encoder.block.9.layer.1.DenseReluDense.wo.weight', 'model_generative.encoder.block.9.layer.1.layer_norm.weight', 'model_generative.encoder.embed_tokens.weight', 'model_generative.encoder.final_layer_norm.weight', 'model_generative.lm_head.weight', 'model_generative.shared.weight']\n", "- This IS expected if you are initializing DistilBertForQuestionAnswering from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", "- This IS NOT expected if you are initializing DistilBertForQuestionAnswering from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", "Some weights of DistilBertForQuestionAnswering were not initialized from the model checkpoint at justinhl/hybrid-qa and are newly initialized: ['embeddings.LayerNorm.bias', 'embeddings.LayerNorm.weight', 'embeddings.position_embeddings.weight', 'embeddings.word_embeddings.weight', 'qa_outputs.bias', 'qa_outputs.weight', 'transformer.layer.0.attention.k_lin.bias', 'transformer.layer.0.attention.k_lin.weight', 'transformer.layer.0.attention.out_lin.bias', 'transformer.layer.0.attention.out_lin.weight', 'transformer.layer.0.attention.q_lin.bias', 'transformer.layer.0.attention.q_lin.weight', 'transformer.layer.0.attention.v_lin.bias', 'transformer.layer.0.attention.v_lin.weight', 'transformer.layer.0.ffn.lin1.bias', 'transformer.layer.0.ffn.lin1.weight', 'transformer.layer.0.ffn.lin2.bias', 'transformer.layer.0.ffn.lin2.weight', 'transformer.layer.0.output_layer_norm.bias', 'transformer.layer.0.output_layer_norm.weight', 'transformer.layer.0.sa_layer_norm.bias', 'transformer.layer.0.sa_layer_norm.weight', 'transformer.layer.1.attention.k_lin.bias', 'transformer.layer.1.attention.k_lin.weight', 'transformer.layer.1.attention.out_lin.bias', 'transformer.layer.1.attention.out_lin.weight', 'transformer.layer.1.attention.q_lin.bias', 'transformer.layer.1.attention.q_lin.weight', 'transformer.layer.1.attention.v_lin.bias', 'transformer.layer.1.attention.v_lin.weight', 'transformer.layer.1.ffn.lin1.bias', 'transformer.layer.1.ffn.lin1.weight', 'transformer.layer.1.ffn.lin2.bias', 'transformer.layer.1.ffn.lin2.weight', 'transformer.layer.1.output_layer_norm.bias', 'transformer.layer.1.output_layer_norm.weight', 'transformer.layer.1.sa_layer_norm.bias', 'transformer.layer.1.sa_layer_norm.weight', 'transformer.layer.2.attention.k_lin.bias', 'transformer.layer.2.attention.k_lin.weight', 'transformer.layer.2.attention.out_lin.bias', 'transformer.layer.2.attention.out_lin.weight', 'transformer.layer.2.attention.q_lin.bias', 'transformer.layer.2.attention.q_lin.weight', 'transformer.layer.2.attention.v_lin.bias', 'transformer.layer.2.attention.v_lin.weight', 'transformer.layer.2.ffn.lin1.bias', 'transformer.layer.2.ffn.lin1.weight', 'transformer.layer.2.ffn.lin2.bias', 'transformer.layer.2.ffn.lin2.weight', 'transformer.layer.2.output_layer_norm.bias', 'transformer.layer.2.output_layer_norm.weight', 'transformer.layer.2.sa_layer_norm.bias', 'transformer.layer.2.sa_layer_norm.weight', 'transformer.layer.3.attention.k_lin.bias', 'transformer.layer.3.attention.k_lin.weight', 'transformer.layer.3.attention.out_lin.bias', 'transformer.layer.3.attention.out_lin.weight', 'transformer.layer.3.attention.q_lin.bias', 'transformer.layer.3.attention.q_lin.weight', 'transformer.layer.3.attention.v_lin.bias', 'transformer.layer.3.attention.v_lin.weight', 'transformer.layer.3.ffn.lin1.bias', 'transformer.layer.3.ffn.lin1.weight', 'transformer.layer.3.ffn.lin2.bias', 'transformer.layer.3.ffn.lin2.weight', 'transformer.layer.3.output_layer_norm.bias', 'transformer.layer.3.output_layer_norm.weight', 'transformer.layer.3.sa_layer_norm.bias', 'transformer.layer.3.sa_layer_norm.weight', 'transformer.layer.4.attention.k_lin.bias', 'transformer.layer.4.attention.k_lin.weight', 'transformer.layer.4.attention.out_lin.bias', 'transformer.layer.4.attention.out_lin.weight', 'transformer.layer.4.attention.q_lin.bias', 'transformer.layer.4.attention.q_lin.weight', 'transformer.layer.4.attention.v_lin.bias', 'transformer.layer.4.attention.v_lin.weight', 'transformer.layer.4.ffn.lin1.bias', 'transformer.layer.4.ffn.lin1.weight', 'transformer.layer.4.ffn.lin2.bias', 'transformer.layer.4.ffn.lin2.weight', 'transformer.layer.4.output_layer_norm.bias', 'transformer.layer.4.output_layer_norm.weight', 'transformer.layer.4.sa_layer_norm.bias', 'transformer.layer.4.sa_layer_norm.weight', 'transformer.layer.5.attention.k_lin.bias', 'transformer.layer.5.attention.k_lin.weight', 'transformer.layer.5.attention.out_lin.bias', 'transformer.layer.5.attention.out_lin.weight', 'transformer.layer.5.attention.q_lin.bias', 'transformer.layer.5.attention.q_lin.weight', 'transformer.layer.5.attention.v_lin.bias', 'transformer.layer.5.attention.v_lin.weight', 'transformer.layer.5.ffn.lin1.bias', 'transformer.layer.5.ffn.lin1.weight', 'transformer.layer.5.ffn.lin2.bias', 'transformer.layer.5.ffn.lin2.weight', 'transformer.layer.5.output_layer_norm.bias', 'transformer.layer.5.output_layer_norm.weight', 'transformer.layer.5.sa_layer_norm.bias', 'transformer.layer.5.sa_layer_norm.weight']\n", "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" ] } ], "source": [ "# Importing from remote\n", "imported_pipe = pipeline(\"hybrid-qa\", model=\"justinhl/hybrid-qa\", trust_remote_code=True)" ] }, { "cell_type": "code", "source": [ "# Inference testing!\n", "imported_pipe(question=\"What is the capital of Norway?\",context=\"The capital of Norway is Oslo\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "sQsoT-UpPp0O", "outputId": "dd922309-bd21-4684-caee-c4d4499bf69b" }, "execution_count": 8, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.\n", "/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py:1141: UserWarning: Using the model-agnostic default `max_length` (=20) to control the generation length. We recommend setting `max_new_tokens` to control the maximum length of the generation.\n", " warnings.warn(\n" ] }, { "output_type": "execute_result", "data": { "text/plain": [ "{'guess': 'Oslo', 'confidence': 2.0940363768613864e-14}" ] }, "metadata": {}, "execution_count": 8 } ] }, { "cell_type": "code", "source": [ "print(\"Model loaded:\", imported_pipe.model)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "GEmtld6OVT7W", "outputId": "93217a25-668e-4a46-8fc9-9db440693a1c" }, "execution_count": 9, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Model loaded: HybridQAModel(\n", " (model_extractive): DistilBertForQuestionAnswering(\n", " (distilbert): DistilBertModel(\n", " (embeddings): Embeddings(\n", " (word_embeddings): Embedding(30522, 768, padding_idx=0)\n", " (position_embeddings): Embedding(512, 768)\n", " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " (transformer): Transformer(\n", " (layer): ModuleList(\n", " (0-5): 6 x TransformerBlock(\n", " (attention): MultiHeadSelfAttention(\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " (q_lin): Linear(in_features=768, out_features=768, bias=True)\n", " (k_lin): Linear(in_features=768, out_features=768, bias=True)\n", " (v_lin): Linear(in_features=768, out_features=768, bias=True)\n", " (out_lin): Linear(in_features=768, out_features=768, bias=True)\n", " )\n", " (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", " (ffn): FFN(\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " (lin1): Linear(in_features=768, out_features=3072, bias=True)\n", " (lin2): Linear(in_features=3072, out_features=768, bias=True)\n", " (activation): GELUActivation()\n", " )\n", " (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", " )\n", " )\n", " )\n", " )\n", " (qa_outputs): Linear(in_features=768, out_features=2, bias=True)\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " (model_generative): T5ForConditionalGeneration(\n", " (shared): Embedding(32128, 768)\n", " (encoder): T5Stack(\n", " (embed_tokens): Embedding(32128, 768)\n", " (block): ModuleList(\n", " (0): T5Block(\n", " (layer): ModuleList(\n", " (0): T5LayerSelfAttention(\n", " (SelfAttention): T5Attention(\n", " (q): Linear(in_features=768, out_features=768, bias=False)\n", " (k): Linear(in_features=768, out_features=768, bias=False)\n", " (v): Linear(in_features=768, out_features=768, bias=False)\n", " (o): Linear(in_features=768, out_features=768, bias=False)\n", " (relative_attention_bias): Embedding(32, 12)\n", " )\n", " (layer_norm): T5LayerNorm()\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " (1): T5LayerFF(\n", " (DenseReluDense): T5DenseActDense(\n", " (wi): Linear(in_features=768, out_features=3072, bias=False)\n", " (wo): Linear(in_features=3072, out_features=768, bias=False)\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " (act): ReLU()\n", " )\n", " (layer_norm): T5LayerNorm()\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " )\n", " )\n", " (1-11): 11 x T5Block(\n", " (layer): ModuleList(\n", " (0): T5LayerSelfAttention(\n", " (SelfAttention): T5Attention(\n", " (q): Linear(in_features=768, out_features=768, bias=False)\n", " (k): Linear(in_features=768, out_features=768, bias=False)\n", " (v): Linear(in_features=768, out_features=768, bias=False)\n", " (o): Linear(in_features=768, out_features=768, bias=False)\n", " )\n", " (layer_norm): T5LayerNorm()\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " (1): T5LayerFF(\n", " (DenseReluDense): T5DenseActDense(\n", " (wi): Linear(in_features=768, out_features=3072, bias=False)\n", " (wo): Linear(in_features=3072, out_features=768, bias=False)\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " (act): ReLU()\n", " )\n", " (layer_norm): T5LayerNorm()\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " )\n", " )\n", " )\n", " (final_layer_norm): T5LayerNorm()\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " (decoder): T5Stack(\n", " (embed_tokens): Embedding(32128, 768)\n", " (block): ModuleList(\n", " (0): T5Block(\n", " (layer): ModuleList(\n", " (0): T5LayerSelfAttention(\n", " (SelfAttention): T5Attention(\n", " (q): Linear(in_features=768, out_features=768, bias=False)\n", " (k): Linear(in_features=768, out_features=768, bias=False)\n", " (v): Linear(in_features=768, out_features=768, bias=False)\n", " (o): Linear(in_features=768, out_features=768, bias=False)\n", " (relative_attention_bias): Embedding(32, 12)\n", " )\n", " (layer_norm): T5LayerNorm()\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " (1): T5LayerCrossAttention(\n", " (EncDecAttention): T5Attention(\n", " (q): Linear(in_features=768, out_features=768, bias=False)\n", " (k): Linear(in_features=768, out_features=768, bias=False)\n", " (v): Linear(in_features=768, out_features=768, bias=False)\n", " (o): Linear(in_features=768, out_features=768, bias=False)\n", " )\n", " (layer_norm): T5LayerNorm()\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " (2): T5LayerFF(\n", " (DenseReluDense): T5DenseActDense(\n", " (wi): Linear(in_features=768, out_features=3072, bias=False)\n", " (wo): Linear(in_features=3072, out_features=768, bias=False)\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " (act): ReLU()\n", " )\n", " (layer_norm): T5LayerNorm()\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " )\n", " )\n", " (1-11): 11 x T5Block(\n", " (layer): ModuleList(\n", " (0): T5LayerSelfAttention(\n", " (SelfAttention): T5Attention(\n", " (q): Linear(in_features=768, out_features=768, bias=False)\n", " (k): Linear(in_features=768, out_features=768, bias=False)\n", " (v): Linear(in_features=768, out_features=768, bias=False)\n", " (o): Linear(in_features=768, out_features=768, bias=False)\n", " )\n", " (layer_norm): T5LayerNorm()\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " (1): T5LayerCrossAttention(\n", " (EncDecAttention): T5Attention(\n", " (q): Linear(in_features=768, out_features=768, bias=False)\n", " (k): Linear(in_features=768, out_features=768, bias=False)\n", " (v): Linear(in_features=768, out_features=768, bias=False)\n", " (o): Linear(in_features=768, out_features=768, bias=False)\n", " )\n", " (layer_norm): T5LayerNorm()\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " (2): T5LayerFF(\n", " (DenseReluDense): T5DenseActDense(\n", " (wi): Linear(in_features=768, out_features=3072, bias=False)\n", " (wo): Linear(in_features=3072, out_features=768, bias=False)\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " (act): ReLU()\n", " )\n", " (layer_norm): T5LayerNorm()\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " )\n", " )\n", " )\n", " (final_layer_norm): T5LayerNorm()\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " )\n", " (lm_head): Linear(in_features=768, out_features=32128, bias=False)\n", " )\n", ")\n" ] } ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.7" }, "colab": { "provenance": [] } }, "nbformat": 4, "nbformat_minor": 0 }