| """ |
| Intelligent Ticket Auto-Routing System - Hugging Face Spaces App |
| ================================================================ |
| Converts support tickets into structured routing decisions: |
| - Multi-label tag classification |
| - Department routing (hybrid: tag-voting + semantic similarity) |
| - Priority prediction |
| - Duplicate detection via FAISS |
| """ |
|
|
| from __future__ import annotations |
|
|
| import csv |
| import os |
| import tempfile |
| import time |
| import uuid |
| from datetime import datetime |
| from pathlib import Path |
|
|
| import gradio as gr |
| import joblib |
| import numpy as np |
| from sentence_transformers import SentenceTransformer |
|
|
| from calibration_utils import ( |
| calibrate_probabilities, |
| load_temperature_scaler, |
| ) |
|
|
| from duplicate_detection_utils import CachedDuplicateDetectionEngine |
|
|
| from hybrid_routing_utils import ( |
| DEFAULT_TAG_TO_DEPARTMENT, |
| assert_valid_routing_label_policy, |
| compute_department_hybrid_scores, |
| load_routing_label_policy, |
| ) |
|
|
| from review_policy_utils import ( |
| apply_controlled_review, |
| load_review_policy, |
| ) |
|
|
| from runtime_utils import ( |
| load_model_config, |
| load_routing_config, |
| resolve_dataset_file, |
| resolve_model_dir, |
| resolve_model_reference, |
| ) |
|
|
| APP_DIR = Path(__file__).resolve().parent |
| MODEL_DIR = resolve_model_dir(APP_DIR) |
|
|
| ROUTING_CONFIG, ROUTING_CONFIG_PATH = load_routing_config(APP_DIR) |
| DEFAULT_DEPARTMENT = str( |
| ROUTING_CONFIG.get("default_department", "Human_Review") |
| ) |
| PRIORITY_ESCALATION = { |
| str(priority).lower(): department |
| for priority, department in (ROUTING_CONFIG.get("priority_escalation") or {}).items() |
| } |
| LOG_PATH = os.path.join(tempfile.gettempdir(), "routing_evaluation_log.csv") |
|
|
|
|
| print("Loading SBERT model...") |
| model_config = load_model_config(APP_DIR) |
| routing_sbert_model_name = resolve_model_reference( |
| model_config.get("sbert_model", "Eklavya73/sbert_finetuned"), |
| base_dir=APP_DIR, |
| model_dir=MODEL_DIR, |
| ) |
| duplicate_sbert_model_name = resolve_model_reference( |
| model_config.get("duplicate_sbert_model", "Eklavya73/duplicate_sbert"), |
| base_dir=APP_DIR, |
| model_dir=MODEL_DIR, |
| default="all-mpnet-base-v2", |
| ) |
| routing_sbert = SentenceTransformer(routing_sbert_model_name) |
| duplicate_sbert = ( |
| routing_sbert |
| if duplicate_sbert_model_name == routing_sbert_model_name |
| else SentenceTransformer(duplicate_sbert_model_name) |
| ) |
|
|
| print("Loading classifiers...") |
| tag_model = joblib.load(MODEL_DIR / "sbert_classifier.pkl") |
| tag_calibrators = joblib.load(MODEL_DIR / "tag_calibrators.pkl") |
| temperature_scaler = load_temperature_scaler(MODEL_DIR / "tag_temperature_scaler.pkl") |
| review_policy = load_review_policy(MODEL_DIR / "routing_review_policy.pkl") |
|
|
| priority_bundle = joblib.load(MODEL_DIR / "tuned_priority_model.pkl") |
| priority_model = ( |
| priority_bundle["model"] |
| if isinstance(priority_bundle, dict) and "model" in priority_bundle |
| else priority_bundle |
| ) |
| priority_encoder = joblib.load(MODEL_DIR / "priority_encoder.pkl") |
| hf_scaler = joblib.load(MODEL_DIR / "hf_scaler.pkl") |
|
|
| tag_binarizer = joblib.load(MODEL_DIR / "mlb_tag_binarizer.pkl") |
| tag_list = list(tag_binarizer.classes_) |
|
|
| dept_prototypes = joblib.load(MODEL_DIR / "department_prototypes.pkl") |
| routing_label_policy = load_routing_label_policy( |
| MODEL_DIR / "routing_label_policy.pkl", |
| fallback_tag_to_department=ROUTING_CONFIG.get( |
| "departments", |
| DEFAULT_TAG_TO_DEPARTMENT, |
| ), |
| valid_tags=tag_list, |
| valid_departments=dept_prototypes.keys(), |
| default_department=DEFAULT_DEPARTMENT, |
| ) |
| tag_to_department = routing_label_policy["tag_to_department"] |
| assert_valid_routing_label_policy( |
| routing_label_policy, |
| valid_tags=tag_list, |
| valid_departments=dept_prototypes.keys(), |
| ) |
|
|
| print("Loading duplicate detection index...") |
| duplicate_engine = CachedDuplicateDetectionEngine(APP_DIR) |
|
|
| print(f"[OK] Tags: {len(tag_list)}, Departments: {len(dept_prototypes)}") |
| print(f"[OK] Routing label policy: {len(tag_to_department)} active mappings") |
| print( |
| "[OK] Routing config: " |
| f"{ROUTING_CONFIG_PATH if ROUTING_CONFIG_PATH is not None else 'defaults'}" |
| ) |
| print(f"[OK] Default human-review department: {DEFAULT_DEPARTMENT}") |
| print(f"[OK] Routing SBERT model: {routing_sbert_model_name}") |
| print(f"[OK] Duplicate SBERT model: {duplicate_sbert_model_name}") |
| print(f"[OK] Duplicate index: {duplicate_engine.index_size} vectors") |
| print(f"[OK] Duplicate threshold: {duplicate_engine.duplicate_threshold:.4f}") |
| print(f"[OK] Temperature scaler: T={temperature_scaler.get('temperature', 1.0):.3f}") |
| print( |
| "[OK] Review policy: " |
| f"target={review_policy.get('target_review_fraction', 0.15):.0%}, " |
| f"percentile_threshold={review_policy.get('percentile_threshold', 0.55):.3f}, " |
| f"fallback_threshold={review_policy.get('fallback_threshold', 0.55):.3f}" |
| ) |
|
|
|
|
| def encode_ticket_embedding(text, encoder): |
| emb = np.asarray(encoder.encode(text), dtype=float).reshape(-1) |
| emb_norm = np.linalg.norm(emb) |
| if emb_norm == 0.0: |
| return emb |
| return emb / emb_norm |
|
|
|
|
| def predict_tags(text, emb): |
| raw_probs = np.asarray(tag_model.predict_proba([emb])[0], dtype=float) |
| calibrated = calibrate_probabilities( |
| raw_probs, |
| tag_calibrators=tag_calibrators, |
| temperature_scaler=temperature_scaler, |
| ) |
| top_idx = calibrated.argsort()[-5:][::-1] |
| return top_idx, calibrated[top_idx], calibrated, raw_probs |
|
|
|
|
| def extract_features(text): |
| words = text.split() |
| return [ |
| len(text), |
| len(words), |
| len(set(words)) / (len(words) + 1), |
| np.mean([len(word) for word in words]) if words else 0, |
| sum(word in text.lower() for word in ["urgent", "critical", "down"]), |
| sum(word in text.lower() for word in ["not", "cannot", "no"]), |
| ] |
|
|
|
|
| def predict_priority(text, emb, return_confidence=False): |
| features = extract_features(text) |
| features_scaled = hf_scaler.transform([features]) |
| x = np.hstack([emb.reshape(1, -1), features_scaled]) |
| pred_idx = int(priority_model.predict(x)[0]) |
| priority_label = str(priority_encoder.classes_[pred_idx]) |
| priority_confidence = float("nan") |
|
|
| if hasattr(priority_model, "predict_proba"): |
| try: |
| probs = np.asarray( |
| priority_model.predict_proba(x)[0], |
| dtype=float, |
| ).reshape(-1) |
| if probs.size: |
| priority_confidence = float(probs[pred_idx]) |
| except Exception: |
| priority_confidence = float("nan") |
|
|
| if return_confidence: |
| return priority_label, priority_confidence |
| return priority_label |
|
|
|
|
| HYBRID_CLASSIFIER_WEIGHT = 0.7 |
| HYBRID_SIMILARITY_WEIGHT = 0.3 |
| HYBRID_FLOOR = 0.45 |
| FLAGGED_HYBRID_FLOOR = 0.30 |
| MARGIN_THRESHOLD = 0.15 |
| ENTROPY_THRESHOLD = 1.8 |
|
|
|
|
| def compute_confidence_metrics(calibrated_probs): |
| probs = np.asarray(calibrated_probs, dtype=float).reshape(-1) |
| if probs.size == 0: |
| return 0.0, float("inf") |
|
|
| sorted_probs = np.sort(probs)[::-1] |
| top1 = float(sorted_probs[0]) |
| top2 = float(sorted_probs[1]) if len(sorted_probs) > 1 else 0.0 |
| margin = top1 - top2 |
|
|
| p = np.clip(probs, 1e-12, None) |
| total = float(p.sum()) |
| if total == 0.0: |
| p = np.full_like(p, 1.0 / len(p)) |
| else: |
| p = p / total |
| entropy = float(-np.sum(p * np.log(p))) |
| return margin, entropy |
|
|
|
|
| def decide_routing_mode(hybrid_confidence, calibrated_probs): |
| margin, entropy = compute_confidence_metrics(calibrated_probs) |
|
|
| if hybrid_confidence < HYBRID_FLOOR: |
| return "HUMAN_REVIEW", True, margin, entropy |
|
|
| if (margin >= MARGIN_THRESHOLD) or (entropy <= ENTROPY_THRESHOLD): |
| return "AUTO_ROUTE", False, margin, entropy |
|
|
| if hybrid_confidence >= FLAGGED_HYBRID_FLOOR: |
| return "AUTO_ROUTE_FLAGGED", True, margin, entropy |
|
|
| return "HUMAN_REVIEW", True, margin, entropy |
|
|
|
|
| def route_ticket(emb, text): |
| _, _, calibrated_probs, _ = predict_tags(text, emb) |
| best_dept, hybrid_confidence, department_details, top_tag_votes = ( |
| compute_department_hybrid_scores( |
| calibrated_probs, |
| emb, |
| dept_prototypes, |
| tag_to_department=tag_to_department, |
| tag_names=tag_list, |
| classifier_weight=HYBRID_CLASSIFIER_WEIGHT, |
| similarity_weight=HYBRID_SIMILARITY_WEIGHT, |
| top_k=5, |
| ) |
| ) |
| priority, priority_confidence = predict_priority( |
| text, |
| emb, |
| return_confidence=True, |
| ) |
| base_mode, _, margin, entropy = decide_routing_mode( |
| hybrid_confidence, |
| calibrated_probs, |
| ) |
|
|
| recommended_department = best_dept |
| routed_department = recommended_department |
| escalation_note = "" |
|
|
| if not top_tag_votes or best_dept is None: |
| review_decision = { |
| "base_mode": "HUMAN_REVIEW", |
| "final_mode": "HUMAN_REVIEW", |
| "forced_human_review": False, |
| "percentile_threshold": float( |
| review_policy.get("percentile_threshold", 0.55) |
| ), |
| "fallback_threshold": float( |
| review_policy.get("fallback_threshold", 0.55) |
| ), |
| "reason": "No valid tag votes or department resolved. Requires human review.", |
| } |
| return { |
| "mode": "HUMAN_REVIEW", |
| "department": DEFAULT_DEPARTMENT, |
| "recommended_department": None, |
| "priority": priority, |
| "priority_confidence": priority_confidence, |
| "hybrid_confidence": hybrid_confidence, |
| "review": True, |
| "margin": margin, |
| "entropy": entropy, |
| "best_details": {}, |
| "top_tag_votes": [], |
| "review_decision": review_decision, |
| "note": review_decision["reason"], |
| } |
|
|
| escalation_department = PRIORITY_ESCALATION.get(str(priority).lower()) |
| if base_mode != "HUMAN_REVIEW" and escalation_department: |
| routed_department = str(escalation_department) |
| escalation_note = ( |
| f" Priority escalation override applied after gate: " |
| f"{priority} -> {routed_department}." |
| ) |
|
|
| mode, review, review_decision = apply_controlled_review( |
| base_mode, |
| hybrid_confidence, |
| review_policy=review_policy, |
| ) |
|
|
| if review_decision.get("forced_human_review", False): |
| final_department = DEFAULT_DEPARTMENT |
| note = ( |
| f"{review_decision.get('reason', '')} " |
| f"Recommended department before override: {routed_department}." |
| f"{escalation_note}" |
| ).strip() |
| elif mode == "AUTO_ROUTE": |
| final_department = routed_department |
| note = ( |
| f"Stage 2 pass: hybrid_confidence={hybrid_confidence:.4f}, " |
| f"margin={margin:.4f}, entropy={entropy:.4f}." |
| f"{escalation_note}" |
| ) |
| elif mode == "AUTO_ROUTE_FLAGGED": |
| final_department = routed_department |
| note = ( |
| f"Stage 2 flagged: hybrid_confidence={hybrid_confidence:.4f}, " |
| f"margin={margin:.4f}, entropy={entropy:.4f}." |
| f"{escalation_note}" |
| ) |
| elif hybrid_confidence < HYBRID_FLOOR: |
| final_department = DEFAULT_DEPARTMENT |
| note = ( |
| f"Stage 1 reject: hybrid_confidence {hybrid_confidence:.4f} " |
| f"< HYBRID_FLOOR {HYBRID_FLOOR}." |
| ) |
| else: |
| final_department = DEFAULT_DEPARTMENT |
| note = ( |
| f"Stage 2 reject: hybrid_confidence={hybrid_confidence:.4f}, " |
| f"margin={margin:.4f}, entropy={entropy:.4f}." |
| ) |
|
|
| best_details = department_details.get(recommended_department, {}) |
| return { |
| "mode": mode, |
| "department": final_department, |
| "recommended_department": recommended_department, |
| "priority": priority, |
| "priority_confidence": priority_confidence, |
| "hybrid_confidence": hybrid_confidence, |
| "review": review, |
| "margin": margin, |
| "entropy": entropy, |
| "best_details": best_details, |
| "top_tag_votes": top_tag_votes, |
| "review_decision": review_decision, |
| "note": note.strip(), |
| } |
|
|
|
|
| LOG_COLUMNS = [ |
| "ticket_id", |
| "timestamp", |
| "ticket_text", |
| "duplicate_flag", |
| "duplicate_score", |
| "routing_mode", |
| "department", |
| "base_routing_mode", |
| "requires_review", |
| "controlled_review_applied", |
| "department_confidence", |
| "classifier_confidence", |
| "semantic_similarity", |
| "raw_semantic_similarity", |
| "priority", |
| "priority_confidence", |
| "selected_tags", |
| "routing_score", |
| "hybrid_confidence", |
| "margin", |
| "entropy", |
| "review_percentile_threshold", |
| "review_fallback_threshold", |
| "prediction_latency_ms", |
| "explanation", |
| ] |
|
|
|
|
| def _ensure_log_header(): |
| if not os.path.exists(LOG_PATH): |
| with open(LOG_PATH, "w", newline="", encoding="utf-8") as handle: |
| csv.writer(handle).writerow(LOG_COLUMNS) |
| return |
|
|
| with open(LOG_PATH, "r", newline="", encoding="utf-8") as handle: |
| existing_header = next(csv.reader(handle), []) |
|
|
| if existing_header != LOG_COLUMNS: |
| with open(LOG_PATH, "w", newline="", encoding="utf-8") as handle: |
| csv.writer(handle).writerow(LOG_COLUMNS) |
|
|
|
|
| def _append_log(row_dict): |
| _ensure_log_header() |
| with open(LOG_PATH, "a", newline="", encoding="utf-8") as handle: |
| csv.writer(handle).writerow([row_dict.get(column, "") for column in LOG_COLUMNS]) |
|
|
|
|
| def process_ticket(text): |
| t0 = time.time() |
| ticket_id = str(uuid.uuid4())[:8] |
| timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
|
|
| routing_emb = encode_ticket_embedding(text, routing_sbert) |
| duplicate_emb = encode_ticket_embedding(text, duplicate_sbert) |
|
|
| best_match = duplicate_engine.find_best_match(duplicate_emb, k=20) |
| dup_score = ( |
| float(best_match["similarity"]) |
| if best_match is not None |
| else 0.0 |
| ) |
| dup_text = best_match.get("matched_text") if best_match is not None else None |
| is_dup = bool( |
| best_match is not None |
| and dup_score >= float(duplicate_engine.duplicate_threshold) |
| ) |
|
|
| routing = route_ticket(routing_emb, text) |
| latency_ms = round((time.time() - t0) * 1000, 2) |
|
|
| mode = routing["mode"] |
| dept = routing["department"] |
| priority = routing["priority"] |
| priority_confidence = routing["priority_confidence"] |
| hybrid_confidence = routing["hybrid_confidence"] |
| review = routing["review"] |
| margin = routing["margin"] |
| entropy = routing["entropy"] |
| best_details = routing["best_details"] |
| top_tag_votes = routing["top_tag_votes"] |
| review_decision = routing["review_decision"] |
| note = routing["note"] |
|
|
| classifier_confidence = float(best_details.get("classifier_confidence", 0.0)) |
| semantic_similarity = float(best_details.get("semantic_similarity", 0.0)) |
| raw_semantic_similarity = float(best_details.get("raw_semantic_similarity", 0.0)) |
| base_mode = str(review_decision.get("base_mode", mode)) |
| review_reason = str(review_decision.get("reason", note)) |
| percentile_threshold = float( |
| review_decision.get( |
| "percentile_threshold", |
| review_policy.get("percentile_threshold", 0.55), |
| ) |
| ) |
| fallback_threshold = float( |
| review_decision.get( |
| "fallback_threshold", |
| review_policy.get("fallback_threshold", 0.55), |
| ) |
| ) |
| controlled_review_applied = bool( |
| review_decision.get("forced_human_review", False) |
| ) |
| recommended_department = routing.get("recommended_department") |
| tag_summary = ", ".join( |
| f"{vote['tag']} ({vote['score']:.2f})" |
| for vote in top_tag_votes[:3] |
| ) |
|
|
| recommended_text = ( |
| f" Recommended department before final policy: {recommended_department}." |
| if recommended_department and recommended_department != dept |
| else "" |
| ) |
|
|
| if is_dup: |
| explanation = ( |
| f"Duplicate detected (score={dup_score:.4f}). " |
| f"Original: {str(dup_text)[:100]}. " |
| f"Routing mode: {mode} (base_mode={base_mode}), " |
| f"final_department={dept}, hybrid_confidence={hybrid_confidence:.3f}, " |
| f"classifier_confidence={classifier_confidence:.3f}, " |
| f"semantic_similarity={semantic_similarity:.3f} " |
| f"(raw={raw_semantic_similarity:.3f}), margin={margin:.3f}, " |
| f"entropy={entropy:.3f}, controlled_review_applied={controlled_review_applied}, " |
| f"review_thresholds=(percentile={percentile_threshold:.3f}, " |
| f"fallback={fallback_threshold:.3f}).{recommended_text} {note}" |
| ) |
| result = { |
| "ticket_id": ticket_id, |
| "status": "DUPLICATE", |
| "route": mode, |
| "department": dept, |
| "priority": priority, |
| "confidence": round(float(hybrid_confidence), 3), |
| "review": review, |
| "tags": tag_summary, |
| "message": ( |
| f"Duplicate of: {str(dup_text)[:200]} (similarity={dup_score:.3f}). " |
| f"{note}" |
| ).strip(), |
| "latency": latency_ms, |
| } |
| else: |
| explanation = ( |
| f"Ticket processed with final department {dept}. " |
| f"Predicted tags [{tag_summary}] produced routing mode {mode} " |
| f"(base_mode={base_mode}), hybrid_confidence={hybrid_confidence:.3f}, " |
| f"classifier_confidence={classifier_confidence:.3f}, " |
| f"semantic_similarity={semantic_similarity:.3f} " |
| f"(raw={raw_semantic_similarity:.3f}), margin={margin:.3f}, " |
| f"entropy={entropy:.3f}, controlled_review_applied={controlled_review_applied}, " |
| f"review_thresholds=(percentile={percentile_threshold:.3f}, " |
| f"fallback={fallback_threshold:.3f}).{recommended_text} {review_reason}" |
| ) |
| result = { |
| "ticket_id": ticket_id, |
| "status": "NOT DUPLICATE", |
| "route": mode, |
| "department": dept, |
| "priority": priority, |
| "confidence": round(float(hybrid_confidence), 3), |
| "review": review, |
| "tags": tag_summary, |
| "message": note if note else "Ticket processed successfully", |
| "latency": latency_ms, |
| } |
|
|
| duplicate_engine.add_ticket(ticket_id, text, embedding=duplicate_emb) |
| _append_log( |
| { |
| "ticket_id": ticket_id, |
| "timestamp": timestamp, |
| "ticket_text": text, |
| "duplicate_flag": is_dup, |
| "duplicate_score": round(float(dup_score), 4), |
| "routing_mode": mode, |
| "department": dept, |
| "department_confidence": round(float(hybrid_confidence), 4), |
| "base_routing_mode": base_mode, |
| "requires_review": bool(review), |
| "controlled_review_applied": controlled_review_applied, |
| "classifier_confidence": round(float(classifier_confidence), 4), |
| "semantic_similarity": round(float(semantic_similarity), 4), |
| "raw_semantic_similarity": round(float(raw_semantic_similarity), 4), |
| "priority": priority, |
| "priority_confidence": ( |
| round(float(priority_confidence), 4) |
| if np.isfinite(priority_confidence) |
| else "" |
| ), |
| "selected_tags": tag_summary, |
| "routing_score": round(float(hybrid_confidence), 4), |
| "hybrid_confidence": round(float(hybrid_confidence), 4), |
| "margin": round(float(margin), 4), |
| "entropy": round(float(entropy), 4), |
| "review_percentile_threshold": round(float(percentile_threshold), 4), |
| "review_fallback_threshold": round(float(fallback_threshold), 4), |
| "prediction_latency_ms": latency_ms, |
| "explanation": explanation, |
| } |
| ) |
|
|
| return result |
|
|
|
|
| def ui_process(text): |
| if not text or not text.strip(): |
| return ("Please enter ticket text", "", "", "", "", "", "", "", "") |
|
|
| result = process_ticket(text.strip()) |
| conf_pct = int(result["confidence"] * 100) |
|
|
| if result["route"] == "HUMAN_REVIEW": |
| review_badge = "Human review required" |
| elif result["route"] == "AUTO_ROUTE_FLAGGED": |
| review_badge = "QA review required" |
| else: |
| review_badge = "No" |
|
|
| priority_map = { |
| "critical": "Critical", |
| "high": "High", |
| "medium": "Medium", |
| "low": "Low", |
| } |
| priority_display = priority_map.get( |
| result["priority"].lower(), |
| result["priority"], |
| ) |
|
|
| route_map = { |
| "AUTO_ROUTE": "Auto-Routed", |
| "AUTO_ROUTE_FLAGGED": "Auto-Routed + Flagged", |
| "HUMAN_REVIEW": "Human Review Required", |
| } |
| route_display = route_map.get(result["route"], result["route"]) |
| dept_display = result["department"].replace("_", " ") |
|
|
| return ( |
| result["status"], |
| result["ticket_id"], |
| route_display, |
| dept_display, |
| priority_display, |
| f"{conf_pct}%", |
| result["tags"], |
| review_badge, |
| result["message"], |
| ) |
|
|
|
|
| CSS = """ |
| @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap'); |
| |
| * { font-family: 'Inter', sans-serif !important; } |
| |
| .gradio-container { |
| max-width: 960px !important; |
| margin: 0 auto !important; |
| } |
| |
| .app-header { |
| text-align: center; |
| padding: 1.5rem 1rem; |
| background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 50%, #a855f7 100%); |
| border-radius: 16px; |
| margin-bottom: 1.5rem; |
| box-shadow: 0 8px 32px rgba(79, 70, 229, 0.3); |
| } |
| .app-header h1 { |
| color: white !important; |
| font-size: 1.75rem !important; |
| font-weight: 700 !important; |
| margin: 0 !important; |
| letter-spacing: -0.02em; |
| } |
| .app-header p { |
| color: rgba(255,255,255,0.85) !important; |
| font-size: 0.95rem !important; |
| margin: 0.4rem 0 0 0 !important; |
| } |
| |
| .result-card { |
| background: linear-gradient(145deg, rgba(255,255,255,0.05), rgba(255,255,255,0.02)); |
| border: 1px solid rgba(255,255,255,0.1); |
| border-radius: 12px; |
| padding: 0.25rem; |
| } |
| |
| .status-box textarea, .status-box input { |
| font-weight: 600 !important; |
| font-size: 1rem !important; |
| } |
| |
| .submit-btn { |
| background: linear-gradient(135deg, #4f46e5, #7c3aed) !important; |
| border: none !important; |
| color: white !important; |
| font-weight: 600 !important; |
| font-size: 1rem !important; |
| padding: 0.75rem 2rem !important; |
| border-radius: 10px !important; |
| box-shadow: 0 4px 16px rgba(79, 70, 229, 0.4) !important; |
| transition: all 0.3s ease !important; |
| } |
| .submit-btn:hover { |
| transform: translateY(-2px) !important; |
| box-shadow: 0 6px 24px rgba(79, 70, 229, 0.5) !important; |
| } |
| |
| .clear-btn { |
| border: 1px solid rgba(255,255,255,0.2) !important; |
| border-radius: 10px !important; |
| font-weight: 500 !important; |
| } |
| |
| .stats-row { |
| text-align: center; |
| padding: 0.75rem; |
| background: rgba(79, 70, 229, 0.08); |
| border-radius: 10px; |
| margin-top: 0.5rem; |
| font-size: 0.85rem; |
| color: #a5b4fc; |
| } |
| |
| footer { display: none !important; } |
| """ |
|
|
|
|
| EXAMPLES = [ |
| [ |
| "My laptop screen is flickering and sometimes goes completely black. " |
| "I've tried restarting but the issue persists after login." |
| ], |
| [ |
| "I cannot access the company VPN from my home network. It keeps showing " |
| "authentication failed error even though my password is correct." |
| ], |
| [ |
| "We need to upgrade our database server as the current one is running out " |
| "of storage space and response times have increased significantly." |
| ], |
| [ |
| "I was charged twice for my last month's subscription. Please process a " |
| "refund for the duplicate charge." |
| ], |
| [ |
| "The email server has been down since this morning. No one in the office " |
| "can send or receive emails. This is critical!" |
| ], |
| [ |
| "Can you provide training materials for the new CRM software that was " |
| "deployed last week?" |
| ], |
| ] |
|
|
|
|
| with gr.Blocks( |
| css=CSS, |
| theme=gr.themes.Soft(primary_hue="indigo", neutral_hue="slate"), |
| title="Ticket Auto-Routing System", |
| ) as app: |
| gr.HTML( |
| """ |
| <div class="app-header"> |
| <h1>Intelligent Ticket Auto-Routing System</h1> |
| <p>AI-powered ticket classification, routing, priority prediction and duplicate detection</p> |
| </div> |
| """ |
| ) |
|
|
| with gr.Row(): |
| with gr.Column(scale=1): |
| ticket_input = gr.Textbox( |
| label="Ticket Description", |
| placeholder="Describe the support issue in detail...", |
| lines=6, |
| max_lines=12, |
| ) |
| with gr.Row(): |
| submit_btn = gr.Button( |
| "Process Ticket", |
| variant="primary", |
| elem_classes=["submit-btn"], |
| ) |
| clear_btn = gr.ClearButton( |
| value="Clear", |
| elem_classes=["clear-btn"], |
| ) |
|
|
| gr.Examples( |
| examples=EXAMPLES, |
| inputs=ticket_input, |
| label="Try these examples", |
| ) |
|
|
| with gr.Column(scale=1): |
| with gr.Group(elem_classes=["result-card"]): |
| dup_status = gr.Textbox( |
| label="Duplicate Status", |
| interactive=False, |
| elem_classes=["status-box"], |
| ) |
| ticket_id = gr.Textbox(label="Ticket ID", interactive=False) |
|
|
| with gr.Group(elem_classes=["result-card"]): |
| with gr.Row(): |
| route_mode = gr.Textbox( |
| label="Routing Mode", |
| interactive=False, |
| ) |
| department = gr.Textbox( |
| label="Department", |
| interactive=False, |
| ) |
| with gr.Row(): |
| priority = gr.Textbox(label="Priority", interactive=False) |
| confidence = gr.Textbox( |
| label="Hybrid Confidence", |
| interactive=False, |
| ) |
|
|
| with gr.Group(elem_classes=["result-card"]): |
| tags = gr.Textbox(label="Predicted Tags", interactive=False) |
| needs_review = gr.Textbox(label="Needs Review", interactive=False) |
| message = gr.Textbox( |
| label="Details", |
| interactive=False, |
| lines=2, |
| ) |
|
|
| gr.HTML( |
| f""" |
| <div class="stats-row"> |
| Database: <strong>{duplicate_engine.index_size:,}</strong> tickets indexed |
| | |
| <strong>{len(tag_list)}</strong> tag categories |
| | |
| <strong>{len(dept_prototypes)}</strong> departments |
| </div> |
| """ |
| ) |
|
|
| outputs = [ |
| dup_status, |
| ticket_id, |
| route_mode, |
| department, |
| priority, |
| confidence, |
| tags, |
| needs_review, |
| message, |
| ] |
|
|
| submit_btn.click(fn=ui_process, inputs=ticket_input, outputs=outputs) |
| ticket_input.submit(fn=ui_process, inputs=ticket_input, outputs=outputs) |
| clear_btn.add([ticket_input] + outputs) |
|
|
|
|
| if __name__ == "__main__": |
| app.launch() |
|
|