Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
json
Languages:
Vietnamese
Size:
1K - 10K
DOI:
License:
| #!/usr/bin/env python3 | |
| """Process raw banking data into three task-specific subsets.""" | |
| import json | |
| import re | |
| from pathlib import Path | |
| def extract_labels_and_text(line): | |
| """Extract labels, sentiments, and clean text from a labeled line.""" | |
| pattern = r"__label__([A-Z_]+)#(positive|negative|neutral)" | |
| matches = re.findall(pattern, line) | |
| text = re.sub(r"__label__[A-Z_]+#(positive|negative|neutral)\s*", "", line).strip() | |
| if not matches or not text: | |
| return None | |
| aspects = [m[0] for m in matches] | |
| sentiments = [m[1] for m in matches] | |
| return aspects, sentiments, text | |
| def get_overall_sentiment(sentiments): | |
| """Get overall sentiment from multiple sentiments.""" | |
| if len(set(sentiments)) == 1: | |
| return sentiments[0] | |
| # Use most common sentiment | |
| counts = {} | |
| for s in sentiments: | |
| counts[s] = counts.get(s, 0) + 1 | |
| return max(counts, key=counts.get) | |
| def save_subset(data, output_path): | |
| """Save data to JSONL file.""" | |
| output_path.parent.mkdir(parents=True, exist_ok=True) | |
| with open(output_path, "w", encoding="utf-8") as f: | |
| for item in data: | |
| f.write(json.dumps(item, ensure_ascii=False) + "\n") | |
| def process_file(input_file, output_dir): | |
| """Process a single input file into three subsets.""" | |
| data = {"classification": [], "sentiment": [], "aspect_sentiment": []} | |
| split_name = "train" if "train" in str(input_file) else "test" | |
| with open(input_file, encoding="utf-8") as f: | |
| for line in f: | |
| result = extract_labels_and_text(line.strip()) | |
| if not result: | |
| continue | |
| aspects, sentiments, text = result | |
| # Classification subset | |
| data["classification"].append({ | |
| "text": text, | |
| "label": aspects[0] | |
| }) | |
| # Sentiment subset | |
| data["sentiment"].append({ | |
| "text": text, | |
| "sentiment": get_overall_sentiment(sentiments) | |
| }) | |
| # Aspect-sentiment subset | |
| aspect_pairs = [ | |
| {"aspect": aspect, "sentiment": sentiment} | |
| for aspect, sentiment in zip(aspects, sentiments, strict=False) | |
| ] | |
| data["aspect_sentiment"].append({ | |
| "text": text, | |
| "aspects": aspect_pairs | |
| }) | |
| # Save all subsets | |
| output_dir = Path(output_dir) | |
| for subset_name, subset_data in data.items(): | |
| output_path = output_dir / subset_name / f"{split_name}.jsonl" | |
| save_subset(subset_data, output_path) | |
| print(f"✅ {subset_name}/{split_name}.jsonl: {len(subset_data)} examples") | |
| def main(): | |
| """Process raw data into task-specific subsets.""" | |
| print("🔄 Processing banking data...") | |
| process_file("raw_data/train.txt", "data") | |
| process_file("raw_data/test.txt", "data") | |
| print("\n🎉 Processing complete!") | |
| print("💡 Run 'python validate.py' to test the dataset") | |
| if __name__ == "__main__": | |
| main() | |