mav23 commited on
Commit
b804315
·
verified ·
1 Parent(s): cf62439

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ qwen2.5-3b-instruct-abliterated.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ license_link: https://huggingface.co/huihui-ai/Qwen2.5-3B-Instruct-abliterated/blob/main/LICENSE
5
+ language:
6
+ - en
7
+ pipeline_tag: text-generation
8
+ base_model: Qwen/Qwen2.5-3B-Instruct
9
+ tags:
10
+ - chat
11
+ - abliterated
12
+ - uncensored
13
+ ---
14
+
15
+ # huihui-ai/Qwen2.5-3B-Instruct-abliterated
16
+
17
+
18
+ This is an uncensored version of [Qwen2.5-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-3B-Instruct) created with abliteration (see [this article](https://huggingface.co/blog/mlabonne/abliteration) to know more about it).
19
+
20
+ Special thanks to [@FailSpy](https://huggingface.co/failspy) for the original code and technique. Please follow him if you're interested in abliterated models.
21
+
22
+ ## Usage
23
+ You can use this model in your applications by loading it with Hugging Face's `transformers` library:
24
+
25
+
26
+ ```python
27
+ from transformers import AutoModelForCausalLM, AutoTokenizer
28
+
29
+ # Load the model and tokenizer
30
+ model_name = "huihui-ai/Qwen2.5-3B-Instruct-abliterated"
31
+ model = AutoModelForCausalLM.from_pretrained(
32
+ model_name,
33
+ torch_dtype="auto",
34
+ device_map="auto"
35
+ )
36
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
37
+
38
+ # Initialize conversation context
39
+ initial_messages = [
40
+ {"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."}
41
+ ]
42
+ messages = initial_messages.copy() # Copy the initial conversation context
43
+
44
+ # Enter conversation loop
45
+ while True:
46
+ # Get user input
47
+ user_input = input("User: ").strip() # Strip leading and trailing spaces
48
+
49
+ # If the user types '/exit', end the conversation
50
+ if user_input.lower() == "/exit":
51
+ print("Exiting chat.")
52
+ break
53
+
54
+ # If the user types '/clean', reset the conversation context
55
+ if user_input.lower() == "/clean":
56
+ messages = initial_messages.copy() # Reset conversation context
57
+ print("Chat history cleared. Starting a new conversation.")
58
+ continue
59
+
60
+ # If input is empty, prompt the user and continue
61
+ if not user_input:
62
+ print("Input cannot be empty. Please enter something.")
63
+ continue
64
+
65
+ # Add user input to the conversation
66
+ messages.append({"role": "user", "content": user_input})
67
+
68
+ # Build the chat template
69
+ text = tokenizer.apply_chat_template(
70
+ messages,
71
+ tokenize=False,
72
+ add_generation_prompt=True
73
+ )
74
+
75
+ # Tokenize input and prepare it for the model
76
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
77
+
78
+ # Generate a response from the model
79
+ generated_ids = model.generate(
80
+ **model_inputs,
81
+ max_new_tokens=8192
82
+ )
83
+
84
+ # Extract model output, removing special tokens
85
+ generated_ids = [
86
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
87
+ ]
88
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
89
+
90
+ # Add the model's response to the conversation
91
+ messages.append({"role": "assistant", "content": response})
92
+
93
+ # Print the model's response
94
+ print(f"Qwen: {response}")
95
+
96
+ ```
97
+
qwen2.5-3b-instruct-abliterated.Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b695deb95684449498a33acfc4e2b4c8761a5b7f7c0f0b6bd98fd70e56381c92
3
+ size 1822849984