alfredplpl commited on
Commit
5810836
·
1 Parent(s): aff8f42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -48,14 +48,14 @@ embeddings_dict = {}
48
  with safe_open("unaestheticXLv31.safetensors", framework="pt") as f:
49
  for k in f.keys():
50
  embeddings_dict[k] = f.get_tensor(k)
 
 
51
  for i in range(len(embeddings_dict["clip_l"])):
52
  token = f"sksd{chr(token_num)}"
53
  token_num+=1
54
  unaestheticXLv31 += token
55
  pipe.tokenizer.add_tokens(token)
56
  token_id = pipe.tokenizer.convert_tokens_to_ids(token)
57
- pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
58
- pipe.text_encoder_2.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
59
  pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
60
  pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
61
 
@@ -64,14 +64,14 @@ embeddings_dict = {}
64
  with safe_open("unaestheticXLv1.safetensors", framework="pt") as f:
65
  for k in f.keys():
66
  embeddings_dict[k] = f.get_tensor(k)
 
 
67
  for i in range(len(embeddings_dict["clip_l"])):
68
  token = f"sksd{chr(token_num)}"
69
  token_num+=1
70
  unaestheticXLv1 += token
71
  pipe.tokenizer.add_tokens(token)
72
  token_id = pipe.tokenizer.convert_tokens_to_ids(token)
73
- pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
74
- pipe.text_encoder_2.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
75
  pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
76
  pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
77
 
@@ -80,14 +80,15 @@ embeddings_dict = {}
80
  with safe_open("unaestheticXLv13.safetensors", framework="pt") as f:
81
  for k in f.keys():
82
  embeddings_dict[k] = f.get_tensor(k)
 
 
 
83
  for i in range(len(embeddings_dict["clip_l"])):
84
  token = f"sksd{chr(token_num)}"
85
  unaestheticXLv13 += token
86
  token_num+=1
87
  pipe.tokenizer.add_tokens(token)
88
  token_id = pipe.tokenizer.convert_tokens_to_ids(token)
89
- pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
90
- pipe.text_encoder_2.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
91
  pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
92
  pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
93
 
 
48
  with safe_open("unaestheticXLv31.safetensors", framework="pt") as f:
49
  for k in f.keys():
50
  embeddings_dict[k] = f.get_tensor(k)
51
+ pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
52
+ pipe.text_encoder_2.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
53
  for i in range(len(embeddings_dict["clip_l"])):
54
  token = f"sksd{chr(token_num)}"
55
  token_num+=1
56
  unaestheticXLv31 += token
57
  pipe.tokenizer.add_tokens(token)
58
  token_id = pipe.tokenizer.convert_tokens_to_ids(token)
 
 
59
  pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
60
  pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
61
 
 
64
  with safe_open("unaestheticXLv1.safetensors", framework="pt") as f:
65
  for k in f.keys():
66
  embeddings_dict[k] = f.get_tensor(k)
67
+ pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
68
+ pipe.text_encoder_2.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
69
  for i in range(len(embeddings_dict["clip_l"])):
70
  token = f"sksd{chr(token_num)}"
71
  token_num+=1
72
  unaestheticXLv1 += token
73
  pipe.tokenizer.add_tokens(token)
74
  token_id = pipe.tokenizer.convert_tokens_to_ids(token)
 
 
75
  pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
76
  pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
77
 
 
80
  with safe_open("unaestheticXLv13.safetensors", framework="pt") as f:
81
  for k in f.keys():
82
  embeddings_dict[k] = f.get_tensor(k)
83
+
84
+ pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
85
+ pipe.text_encoder_2.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
86
  for i in range(len(embeddings_dict["clip_l"])):
87
  token = f"sksd{chr(token_num)}"
88
  unaestheticXLv13 += token
89
  token_num+=1
90
  pipe.tokenizer.add_tokens(token)
91
  token_id = pipe.tokenizer.convert_tokens_to_ids(token)
 
 
92
  pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
93
  pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
94