humanizer / app.py
VictorM-Coder's picture
Create app.py
105c7d1 verified
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread
import spaces
# Model configuration
MODEL_ID = "Qwen/Qwen3-8B-Instruct" # Replace with specific adversarial repo if different
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype=torch.float16,
device_map="auto",
)
@spaces.GPU(duration=60)
def humanize_text(text, intensity):
# Constructing the adversarial prompt
prompt = f"Rewrite the following text to be highly human-like, varying sentence structure and avoiding common AI patterns. Maintain the original meaning perfectly.\n\nIntensity: {intensity}/10\nText: {text}\n\nHumanized Output:"
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = dict(
inputs,
streamer=streamer,
max_new_tokens=1024,
do_sample=True,
temperature=0.7 + (intensity * 0.05), # Higher intensity = more randomness
top_p=0.95,
)
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
output_text = ""
for new_text in streamer:
output_text += new_text
yield output_text
# Gradio Interface
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🤖 Adversarial Humanizer (Qwen3-8B)")
gr.Markdown("Transform AI-generated text into human-like prose to bypass detection.")
with gr.Row():
with gr.Column():
input_box = gr.Textbox(label="Input Text", lines=8, placeholder="Paste AI content here...")
intensity_slider = gr.Slider(minimum=1, maximum=10, value=5, step=1, label="Humanization Intensity")
submit_btn = gr.Button("Humanize", variant="primary")
with gr.Column():
output_box = gr.Textbox(label="Humanized Result", lines=10, interactive=False)
submit_btn.click(
fn=humanize_text,
inputs=[input_box, intensity_slider],
outputs=output_box
)
demo.launch()