fixed :fixed the response
Browse files
features/Model/English_model/feature_names.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
"perplexity",
|
| 3 |
+
"burst_mean",
|
| 4 |
+
"burst_std",
|
| 5 |
+
"burst_max",
|
| 6 |
+
"burst_min",
|
| 7 |
+
"burst_range",
|
| 8 |
+
"num_words",
|
| 9 |
+
"num_chars",
|
| 10 |
+
"num_sentences",
|
| 11 |
+
"avg_word_len",
|
| 12 |
+
"avg_sent_len",
|
| 13 |
+
"lexical_diversity",
|
| 14 |
+
"punct_ratio",
|
| 15 |
+
"caps_ratio",
|
| 16 |
+
"flesch_reading",
|
| 17 |
+
"flesch_grade"
|
| 18 |
+
]
|
features/Model/English_model/metadata.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"selected_model": "hybrid_tfidf_logistic",
|
| 3 |
+
"cv_best_f1": 0.8593569681592504,
|
| 4 |
+
"num_engineered_features": 16,
|
| 5 |
+
"num_word_tfidf_features": 86956,
|
| 6 |
+
"num_char_tfidf_features": 80000,
|
| 7 |
+
"train_samples": 15952,
|
| 8 |
+
"test_samples": 3988,
|
| 9 |
+
"train_accuracy": 0.980253259779338,
|
| 10 |
+
"train_f1": 0.980182447310475,
|
| 11 |
+
"test_accuracy": 0.8713640922768305,
|
| 12 |
+
"test_f1": 0.8707482993197279
|
| 13 |
+
}
|
features/nepali_text_classifier/inferencer.py
CHANGED
|
@@ -81,9 +81,9 @@ def classify_text(text: str, model_names=None, top_k: int = 2):
|
|
| 81 |
return {
|
| 82 |
"label": final_label,
|
| 83 |
"confidence": round(avg_conf * 100, 2),
|
| 84 |
-
"selected_models": selected_names,
|
| 85 |
-
"model_predictions": per_model,
|
| 86 |
-
"votes": {"AI": ai_votes, "Human": human_votes},
|
| 87 |
-
"available_models": list(models.keys()),
|
| 88 |
-
"unavailable_models": artifacts["unavailable_models"],
|
| 89 |
}
|
|
|
|
| 81 |
return {
|
| 82 |
"label": final_label,
|
| 83 |
"confidence": round(avg_conf * 100, 2),
|
| 84 |
+
# "selected_models": selected_names,
|
| 85 |
+
# "model_predictions": per_model,
|
| 86 |
+
# "votes": {"AI": ai_votes, "Human": human_votes},
|
| 87 |
+
# "available_models": list(models.keys()),
|
| 88 |
+
# "unavailable_models": artifacts["unavailable_models"],
|
| 89 |
}
|
features/text_classifier/controller.py
CHANGED
|
@@ -12,22 +12,22 @@ from .preprocess import parse_docx, parse_pdf, parse_txt
|
|
| 12 |
security = HTTPBearer()
|
| 13 |
|
| 14 |
|
| 15 |
-
def build_bias_summary(ai_likelihood: float) -> dict[str, object]:
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
|
| 32 |
|
| 33 |
# Verify Bearer token from Authorization header
|
|
@@ -54,12 +54,11 @@ async def handle_text_analysis(text: str):
|
|
| 54 |
)
|
| 55 |
|
| 56 |
label, perplexity, ai_likelihood = await asyncio.to_thread(classify_text, text)
|
| 57 |
-
bias_summary = build_bias_summary(ai_likelihood)
|
| 58 |
return {
|
| 59 |
"result": label,
|
| 60 |
"perplexity": round(perplexity, 2),
|
| 61 |
"ai_likelihood": ai_likelihood,
|
| 62 |
-
**bias_summary,
|
| 63 |
}
|
| 64 |
|
| 65 |
|
|
|
|
| 12 |
security = HTTPBearer()
|
| 13 |
|
| 14 |
|
| 15 |
+
# def build_bias_summary(ai_likelihood: float) -> dict[str, object]:
|
| 16 |
+
# """Convert an AI likelihood score into a human-readable bias summary."""
|
| 17 |
+
# if ai_likelihood > 50:
|
| 18 |
+
# overall_bias = "AI"
|
| 19 |
+
# bias_statement = f"The text is biased toward AI-generated writing ({ai_likelihood}% AI likelihood)."
|
| 20 |
+
# elif ai_likelihood < 50:
|
| 21 |
+
# overall_bias = "Human"
|
| 22 |
+
# bias_statement = f"The text is biased toward human writing ({100 - ai_likelihood}% human likelihood)."
|
| 23 |
+
# else:
|
| 24 |
+
# overall_bias = "Balanced"
|
| 25 |
+
# bias_statement = "The text is balanced between AI and human writing."
|
| 26 |
+
|
| 27 |
+
# return {
|
| 28 |
+
# "overall_bias": overall_bias,
|
| 29 |
+
# "bias_statement": bias_statement,
|
| 30 |
+
# }
|
| 31 |
|
| 32 |
|
| 33 |
# Verify Bearer token from Authorization header
|
|
|
|
| 54 |
)
|
| 55 |
|
| 56 |
label, perplexity, ai_likelihood = await asyncio.to_thread(classify_text, text)
|
| 57 |
+
# bias_summary = build_bias_summary(ai_likelihood)
|
| 58 |
return {
|
| 59 |
"result": label,
|
| 60 |
"perplexity": round(perplexity, 2),
|
| 61 |
"ai_likelihood": ai_likelihood,
|
|
|
|
| 62 |
}
|
| 63 |
|
| 64 |
|