Migaku commited on
Commit
485488a
1 Parent(s): adba37b

Add config.json

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ config.json filter=lfs diff=lfs merge=lfs -text
.ipynb_checkpoints/README-checkpoint.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ language:
4
+ - ja
5
+ tags:
6
+ - PyTorch
7
+ - Transformers
8
+ ---
9
+
10
+ Japanese Stock Comment Sentiment Model
11
+
12
+ This model is a sentiment analysis tool specifically trained to analyze comments and discussions related to Japanese stocks. It is specialized in determining whether a comment has a bearish or bullish sentiment.
13
+ For its training, a large collection of individual stock-related comments was gathered, and these were categorized into two main categories: "bullish" and "bearish." This model can serve as a supportive tool for stock investors and market analysts in gathering information and making prompt decisions.
config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c271b07279966955f9d386b440b0968ab195f55a71791df755f1489bcf89c4c6
3
+ size 870
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_lower_case": false,
4
+ "do_subword_tokenize": true,
5
+ "do_word_tokenize": true,
6
+ "mask_token": "[MASK]",
7
+ "mecab_kwargs": null,
8
+ "model_max_length": 512,
9
+ "name_or_path": "cl-tohoku/bert-base-japanese-whole-word-masking",
10
+ "never_split": null,
11
+ "pad_token": "[PAD]",
12
+ "sep_token": "[SEP]",
13
+ "special_tokens_map_file": null,
14
+ "subword_tokenizer_type": "wordpiece",
15
+ "tokenizer_class": "BertJapaneseTokenizer",
16
+ "unk_token": "[UNK]",
17
+ "word_tokenizer_type": "mecab"
18
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78cf65f3982a45acc196317ec75f56eded3f073acdcdc5e7255b59130b128d7a
3
+ size 3311
vocab.txt ADDED
The diff for this file is too large to render. See raw diff