Konthee commited on
Commit
c5fb59f
1 Parent(s): 2289fe8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -6
README.md CHANGED
@@ -45,8 +45,8 @@ Texts are preprocessed with the following rules: [process_transformers](https://
45
  ```python
46
  from transformers import AutoModel,AutoProcessor
47
  from thai2transformers.preprocess import process_transformers
48
- model = AutoModel.from_pretrained("openthaigpt/CLIPTextCamembertModelWithProjection-contrastive", trust_remote_code=True)
49
- processor = AutoProcessor.from_pretrained("openthaigpt/CLIPTextCamembertModelWithProjection-contrastive", trust_remote_code=True)
50
 
51
  input_text = ["This is dog",
52
  "how are you today",
@@ -79,8 +79,8 @@ image_processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
79
  image_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
80
 
81
  # Load text model and processor.
82
- text_processor = AutoProcessor.from_pretrained("openthaigpt/CLIPTextCamembertModelWithProjection-contrastive", trust_remote_code=True)
83
- text_model = AutoModel.from_pretrained("openthaigpt/CLIPTextCamembertModelWithProjection-contrastive", trust_remote_code=True).to(device)
84
 
85
  class_labels = ['แมว','หมา', 'นก']
86
  label2id = {label: i for i, label in enumerate(class_labels)}
@@ -118,8 +118,8 @@ image_processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
118
  image_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
119
 
120
  # Load text model and processor.
121
- text_processor = AutoProcessor.from_pretrained("openthaigpt/CLIPTextCamembertModelWithProjection-contrastive", trust_remote_code=True)
122
- text_model = AutoModel.from_pretrained("openthaigpt/CLIPTextCamembertModelWithProjection-contrastive", trust_remote_code=True).to(device)
123
 
124
  text_input = ['แมวสีส้ม','หมาสีดำ', 'นกสีขาว']
125
  processed_input_text = [process_transformers(input_text_) for input_text_ in input_text ]
 
45
  ```python
46
  from transformers import AutoModel,AutoProcessor
47
  from thai2transformers.preprocess import process_transformers
48
+ model = AutoModel.from_pretrained("openthaigpt/CLIPTextCamembertModelWithProjection", trust_remote_code=True)
49
+ processor = AutoProcessor.from_pretrained("openthaigpt/CLIPTextCamembertModelWithProjection", trust_remote_code=True)
50
 
51
  input_text = ["This is dog",
52
  "how are you today",
 
79
  image_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
80
 
81
  # Load text model and processor.
82
+ text_processor = AutoProcessor.from_pretrained("openthaigpt/CLIPTextCamembertModelWithProjection", trust_remote_code=True)
83
+ text_model = AutoModel.from_pretrained("openthaigpt/CLIPTextCamembertModelWithProjection", trust_remote_code=True).to(device)
84
 
85
  class_labels = ['แมว','หมา', 'นก']
86
  label2id = {label: i for i, label in enumerate(class_labels)}
 
118
  image_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
119
 
120
  # Load text model and processor.
121
+ text_processor = AutoProcessor.from_pretrained("openthaigpt/CLIPTextCamembertModelWithProjection", trust_remote_code=True)
122
+ text_model = AutoModel.from_pretrained("openthaigpt/CLIPTextCamembertModelWithProjection", trust_remote_code=True).to(device)
123
 
124
  text_input = ['แมวสีส้ม','หมาสีดำ', 'นกสีขาว']
125
  processed_input_text = [process_transformers(input_text_) for input_text_ in input_text ]