KardelRuveyda commited on
Commit
3fd879c
1 Parent(s): 37cd090

Training in progress epoch 0

Browse files
README.md CHANGED
@@ -4,18 +4,20 @@ base_model: gpt2
4
  tags:
5
  - generated_from_keras_callback
6
  model-index:
7
- - name: chatbotSentences-mini
8
  results: []
9
  ---
10
 
11
  <!-- This model card has been generated automatically according to the information Keras had access to. You should
12
  probably proofread and complete it, then remove this comment. -->
13
 
14
- # chatbotSentences-mini
15
 
16
  This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
-
 
 
19
 
20
  ## Model description
21
 
@@ -39,6 +41,9 @@ The following hyperparameters were used during training:
39
 
40
  ### Training results
41
 
 
 
 
42
 
43
 
44
  ### Framework versions
 
4
  tags:
5
  - generated_from_keras_callback
6
  model-index:
7
+ - name: KardelRuveyda/chatbotSentences-mini
8
  results: []
9
  ---
10
 
11
  <!-- This model card has been generated automatically according to the information Keras had access to. You should
12
  probably proofread and complete it, then remove this comment. -->
13
 
14
+ # KardelRuveyda/chatbotSentences-mini
15
 
16
  This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Train Loss: 10.3115
19
+ - Validation Loss: 10.0022
20
+ - Epoch: 0
21
 
22
  ## Model description
23
 
 
41
 
42
  ### Training results
43
 
44
+ | Train Loss | Validation Loss | Epoch |
45
+ |:----------:|:---------------:|:-----:|
46
+ | 10.3115 | 10.0022 | 0 |
47
 
48
 
49
  ### Framework versions
special_tokens_map.json CHANGED
@@ -13,6 +13,7 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
 
16
  "unk_token": {
17
  "content": "<|endoftext|>",
18
  "lstrip": false,
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "<|endoftext|>",
17
  "unk_token": {
18
  "content": "<|endoftext|>",
19
  "lstrip": false,
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dc3aed915b00fe5267dd37a5cc8a1ab36c060fbc51d9b189f2f810ac170034f7
3
  size 482464848
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff3da2aa54811cb9cf2b758a40dbc6b40554bd300f3e2af4c516ad1e93ad0ab8
3
  size 482464848
tokenizer.json CHANGED
@@ -1,6 +1,11 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 40,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
  "padding": null,
10
  "added_tokens": [
11
  {
tokenizer_config.json CHANGED
@@ -16,7 +16,7 @@
16
  "eos_token": "<|endoftext|>",
17
  "errors": "replace",
18
  "model_max_length": 1024,
19
- "pad_token": null,
20
  "tokenizer_class": "GPT2Tokenizer",
21
  "unk_token": "<|endoftext|>"
22
  }
 
16
  "eos_token": "<|endoftext|>",
17
  "errors": "replace",
18
  "model_max_length": 1024,
19
+ "pad_token": "<|endoftext|>",
20
  "tokenizer_class": "GPT2Tokenizer",
21
  "unk_token": "<|endoftext|>"
22
  }