thanhtai435's picture
Add chapter-04-data-preprocessing/lab-04-data-cleaning.py
41308c3 verified
"""
Lab 4: Data Cleaning & Preprocessing
=====================================
BIM5021 - Nhà kho dữ liệu và Tích hợp
Chương 4: Tiền xử lý dữ liệu
Mục tiêu:
- Đánh giá chất lượng dữ liệu (Data Quality Assessment)
- Xử lý Missing Values (nhiều phương pháp)
- Phát hiện và xử lý Outliers (IQR, Z-score)
- Normalization (Min-Max, Z-Score, Robust)
- PCA (Principal Component Analysis)
- Feature Engineering cho Olist dataset
Yêu cầu: pip install pandas numpy scikit-learn matplotlib seaborn
"""
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from sklearn.decomposition import PCA
from sklearn.impute import KNNImputer
import warnings
warnings.filterwarnings('ignore')
# ==============================================================================
# PHẦN 1: Tạo Sample Dataset (mô phỏng Olist)
# ==============================================================================
def create_sample_olist():
"""Tạo sample dataset mô phỏng Olist với các vấn đề data quality."""
np.random.seed(42)
n = 1000
# Tạo dữ liệu với các vấn đề data quality có chủ đích
df = pd.DataFrame({
'order_id': [f'ORD_{i:05d}' for i in range(n)],
'price': np.concatenate([
np.random.lognormal(4, 1, n-10), # Normal prices
np.array([-50, -10, 0, 0, 0, # Negative/zero (errors)
5000, 8000, 12000, 15000, 20000]) # Outliers
]),
'freight_value': np.concatenate([
np.random.exponential(30, n-5),
np.array([np.nan, np.nan, np.nan, -5, 500]) # Missing + errors
]),
'review_score': np.random.choice([1, 2, 3, 4, 5, np.nan], n,
p=[0.05, 0.05, 0.1, 0.3, 0.4, 0.1]),
'product_weight_g': np.concatenate([
np.random.lognormal(6, 1.2, n-3),
np.array([np.nan, np.nan, 0]) # Missing + zero
]),
'product_length_cm': np.random.uniform(5, 100, n),
'product_height_cm': np.random.uniform(2, 80, n),
'product_width_cm': np.random.uniform(5, 60, n),
'customer_state': np.random.choice(
['SP', 'RJ', 'MG', 'RS', 'PR', 'BA', 'SC', None],
n, p=[0.3, 0.15, 0.1, 0.1, 0.08, 0.07, 0.05, 0.15]
),
'delivery_days': np.concatenate([
np.random.exponential(10, n-5),
np.array([np.nan, np.nan, -2, 0, 120]) # Missing + errors + outlier
]),
'order_status': np.random.choice(
['delivered', 'shipped', 'canceled', 'processing', 'DELIVERED', 'Delivered'],
n, p=[0.6, 0.1, 0.05, 0.05, 0.1, 0.1]
),
})
# Shuffle
df = df.sample(frac=1, random_state=42).reset_index(drop=True)
print("=" * 70)
print(" SAMPLE DATASET CREATED")
print("=" * 70)
print(f" Shape: {df.shape}")
print(f" Columns: {list(df.columns)}")
print(f"\n First 5 rows:")
print(df.head().to_string())
return df
# ==============================================================================
# PHẦN 2: Data Quality Assessment
# ==============================================================================
def assess_data_quality(df: pd.DataFrame):
"""Đánh giá chất lượng dữ liệu toàn diện."""
print("\n" + "=" * 70)
print(" DATA QUALITY ASSESSMENT")
print("=" * 70)
# 2.1 Missing Values
print("\n--- 1. Missing Values ---")
missing = df.isnull().sum()
missing_pct = (missing / len(df) * 100).round(2)
missing_report = pd.DataFrame({
'Missing Count': missing,
'Missing %': missing_pct,
'Data Type': df.dtypes
})
missing_report = missing_report[missing_report['Missing Count'] > 0].sort_values(
'Missing %', ascending=False
)
print(missing_report.to_string())
# 2.2 Duplicates
print("\n--- 2. Duplicates ---")
dup_count = df.duplicated().sum()
dup_id = df['order_id'].duplicated().sum()
print(f" Full row duplicates: {dup_count}")
print(f" Duplicate order_ids: {dup_id}")
# 2.3 Negative/Invalid Values
print("\n--- 3. Invalid Values ---")
numeric_cols = df.select_dtypes(include=[np.number]).columns
for col in numeric_cols:
neg_count = (df[col] < 0).sum()
zero_count = (df[col] == 0).sum()
if neg_count > 0 or (zero_count > 0 and col != 'review_score'):
print(f" {col}: {neg_count} negative, {zero_count} zeros")
# 2.4 Consistency check
print("\n--- 4. Consistency ---")
if 'order_status' in df.columns:
print(f" order_status unique values: {df['order_status'].unique()}")
print(f" → Inconsistency: mixed case (delivered, DELIVERED, Delivered)")
# 2.5 Outliers (IQR method)
print("\n--- 5. Outliers (IQR Method) ---")
for col in ['price', 'freight_value', 'delivery_days', 'product_weight_g']:
if col in df.columns:
data = df[col].dropna()
Q1 = data.quantile(0.25)
Q3 = data.quantile(0.75)
IQR = Q3 - Q1
lower = Q1 - 1.5 * IQR
upper = Q3 + 1.5 * IQR
outliers = ((data < lower) | (data > upper)).sum()
print(f" {col}: Q1={Q1:.1f}, Q3={Q3:.1f}, IQR={IQR:.1f}, "
f"bounds=[{lower:.1f}, {upper:.1f}], outliers={outliers} ({outliers/len(data)*100:.1f}%)")
# 2.6 Summary Score
print("\n--- 6. Quality Score ---")
total_cells = df.shape[0] * df.shape[1]
missing_cells = df.isnull().sum().sum()
completeness = (1 - missing_cells / total_cells) * 100
print(f" Completeness: {completeness:.1f}%")
print(f" Total issues found: missing={missing_cells}, duplicates={dup_count}")
# ==============================================================================
# PHẦN 3: Data Cleaning
# ==============================================================================
def clean_data(df: pd.DataFrame) -> pd.DataFrame:
"""Làm sạch dữ liệu."""
print("\n" + "=" * 70)
print(" DATA CLEANING")
print("=" * 70)
df_clean = df.copy()
original_shape = df_clean.shape
# 3.1 Standardize categorical values
print("\n--- Step 1: Standardize Categories ---")
if 'order_status' in df_clean.columns:
before = df_clean['order_status'].nunique()
df_clean['order_status'] = df_clean['order_status'].str.lower().str.strip()
after = df_clean['order_status'].nunique()
print(f" order_status: {before}{after} unique values")
if 'customer_state' in df_clean.columns:
df_clean['customer_state'] = df_clean['customer_state'].str.upper().str.strip()
# 3.2 Handle invalid values
print("\n--- Step 2: Fix Invalid Values ---")
# Remove negative prices
neg_price = (df_clean['price'] < 0).sum()
df_clean.loc[df_clean['price'] < 0, 'price'] = np.nan
print(f" price: {neg_price} negative values → set to NaN")
# Remove negative freight
neg_freight = (df_clean['freight_value'] < 0).sum()
df_clean.loc[df_clean['freight_value'] < 0, 'freight_value'] = np.nan
print(f" freight_value: {neg_freight} negative values → set to NaN")
# Remove negative delivery_days
neg_delivery = (df_clean['delivery_days'] < 0).sum()
df_clean.loc[df_clean['delivery_days'] < 0, 'delivery_days'] = np.nan
print(f" delivery_days: {neg_delivery} negative values → set to NaN")
# 3.3 Handle missing values
print("\n--- Step 3: Impute Missing Values ---")
# Numerical: median imputation (robust to outliers)
for col in ['price', 'freight_value', 'product_weight_g', 'delivery_days']:
if col in df_clean.columns:
n_missing = df_clean[col].isna().sum()
median_val = df_clean[col].median()
df_clean[col].fillna(median_val, inplace=True)
print(f" {col}: {n_missing} missing → filled with median ({median_val:.2f})")
# Categorical: mode imputation
for col in ['customer_state']:
if col in df_clean.columns:
n_missing = df_clean[col].isna().sum()
mode_val = df_clean[col].mode()[0]
df_clean[col].fillna(mode_val, inplace=True)
print(f" {col}: {n_missing} missing → filled with mode ({mode_val})")
# review_score: forward fill or mode
if 'review_score' in df_clean.columns:
n_missing = df_clean['review_score'].isna().sum()
df_clean['review_score'].fillna(df_clean['review_score'].mode()[0], inplace=True)
print(f" review_score: {n_missing} missing → filled with mode")
# 3.4 Handle outliers (capping/winsorizing)
print("\n--- Step 4: Handle Outliers (Capping) ---")
for col in ['price', 'freight_value', 'delivery_days']:
if col in df_clean.columns:
Q1 = df_clean[col].quantile(0.01)
Q99 = df_clean[col].quantile(0.99)
before_outliers = ((df_clean[col] < Q1) | (df_clean[col] > Q99)).sum()
df_clean[col] = df_clean[col].clip(lower=Q1, upper=Q99)
print(f" {col}: Capped to [{Q1:.2f}, {Q99:.2f}], {before_outliers} values adjusted")
# 3.5 Remove duplicates
print("\n--- Step 5: Remove Duplicates ---")
before = len(df_clean)
df_clean = df_clean.drop_duplicates(subset=['order_id'])
after = len(df_clean)
print(f" Rows: {before}{after} (removed {before - after} duplicates)")
print(f"\n Final shape: {original_shape}{df_clean.shape}")
return df_clean
# ==============================================================================
# PHẦN 4: Normalization Comparison
# ==============================================================================
def normalize_comparison(df: pd.DataFrame):
"""So sánh các phương pháp normalization."""
print("\n" + "=" * 70)
print(" NORMALIZATION COMPARISON")
print("=" * 70)
col = 'price'
data = df[[col]].dropna().copy()
# Apply different scalers
scalers = {
'Original': data[col].values,
'Min-Max [0,1]': MinMaxScaler().fit_transform(data).flatten(),
'Z-Score (Standard)': StandardScaler().fit_transform(data).flatten(),
'Robust (IQR)': RobustScaler().fit_transform(data).flatten(),
}
fig, axes = plt.subplots(2, 2, figsize=(12, 10))
for ax, (name, values) in zip(axes.flat, scalers.items()):
ax.hist(values, bins=50, alpha=0.7, color='#3498db', edgecolor='white')
ax.set_title(f'{name}\nmean={np.mean(values):.2f}, std={np.std(values):.2f}')
ax.set_ylabel('Frequency')
ax.axvline(np.mean(values), color='red', linestyle='--', label=f'Mean={np.mean(values):.2f}')
ax.axvline(np.median(values), color='green', linestyle='--', label=f'Median={np.median(values):.2f}')
ax.legend(fontsize=8)
plt.suptitle('Normalization Methods Comparison (price column)', fontsize=14, fontweight='bold')
plt.tight_layout()
plt.savefig('normalization_comparison.png', dpi=150, bbox_inches='tight')
print(f"\n [OK] Saved: normalization_comparison.png")
plt.close()
# Print statistics
print(f"\n Statistics comparison for '{col}':")
print(f" {'Method':<25} {'Mean':>10} {'Std':>10} {'Min':>10} {'Max':>10}")
print(f" {'-'*65}")
for name, values in scalers.items():
print(f" {name:<25} {np.mean(values):>10.3f} {np.std(values):>10.3f} "
f"{np.min(values):>10.3f} {np.max(values):>10.3f}")
# ==============================================================================
# PHẦN 5: PCA (Dimensionality Reduction)
# ==============================================================================
def pca_analysis(df: pd.DataFrame):
"""PCA analysis trên product dimensions."""
print("\n" + "=" * 70)
print(" PCA - DIMENSIONALITY REDUCTION")
print("=" * 70)
# Chọn features số
features = ['price', 'freight_value', 'product_weight_g',
'product_length_cm', 'product_height_cm', 'product_width_cm',
'delivery_days']
available_features = [f for f in features if f in df.columns]
data = df[available_features].dropna()
print(f" Features: {available_features}")
print(f" Samples: {len(data)}")
# Standardize
scaler = StandardScaler()
data_scaled = scaler.fit_transform(data)
# PCA
pca = PCA()
pca_result = pca.fit_transform(data_scaled)
# Explained variance
print(f"\n Explained Variance Ratio:")
cumulative = 0
for i, (var, cum_var) in enumerate(zip(
pca.explained_variance_ratio_,
np.cumsum(pca.explained_variance_ratio_)
)):
marker = " ← 95% reached" if cum_var >= 0.95 and cumulative < 0.95 else ""
print(f" PC{i+1}: {var:.4f} ({var*100:.1f}%) "
f"Cumulative: {cum_var:.4f} ({cum_var*100:.1f}%){marker}")
cumulative = cum_var
# Determine number of components for 95% variance
n_components_95 = np.argmax(np.cumsum(pca.explained_variance_ratio_) >= 0.95) + 1
print(f"\n → {n_components_95} components explain 95%+ variance "
f"(reduced from {len(available_features)} features)")
# Component loadings
print(f"\n Component Loadings (first 3 PCs):")
loadings = pd.DataFrame(
pca.components_[:3].T,
index=available_features,
columns=[f'PC{i+1}' for i in range(3)]
).round(3)
print(loadings.to_string())
# Visualization
fig, axes = plt.subplots(1, 2, figsize=(14, 5))
# Scree plot
axes[0].bar(range(1, len(pca.explained_variance_ratio_) + 1),
pca.explained_variance_ratio_, alpha=0.7, color='#3498db', label='Individual')
axes[0].plot(range(1, len(pca.explained_variance_ratio_) + 1),
np.cumsum(pca.explained_variance_ratio_), 'ro-', label='Cumulative')
axes[0].axhline(y=0.95, color='green', linestyle='--', label='95% threshold')
axes[0].set_xlabel('Principal Component')
axes[0].set_ylabel('Explained Variance Ratio')
axes[0].set_title('PCA Scree Plot')
axes[0].legend()
# 2D scatter plot (PC1 vs PC2)
scatter = axes[1].scatter(pca_result[:, 0], pca_result[:, 1],
c=data['price'].values, cmap='viridis',
alpha=0.5, s=10)
axes[1].set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]*100:.1f}%)')
axes[1].set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]*100:.1f}%)')
axes[1].set_title('PCA: PC1 vs PC2 (colored by price)')
plt.colorbar(scatter, ax=axes[1], label='Price')
plt.tight_layout()
plt.savefig('pca_analysis.png', dpi=150, bbox_inches='tight')
print(f"\n [OK] Saved: pca_analysis.png")
plt.close()
# ==============================================================================
# PHẦN 6: Feature Engineering
# ==============================================================================
def feature_engineering(df: pd.DataFrame) -> pd.DataFrame:
"""Feature Engineering cho Olist dataset."""
print("\n" + "=" * 70)
print(" FEATURE ENGINEERING")
print("=" * 70)
df_feat = df.copy()
new_features = []
# 1. Price features
if 'price' in df_feat.columns and 'freight_value' in df_feat.columns:
df_feat['freight_ratio'] = (df_feat['freight_value'] / df_feat['price']).round(4)
df_feat['total_value'] = df_feat['price'] + df_feat['freight_value']
df_feat['is_free_shipping'] = (df_feat['freight_value'] == 0).astype(int)
new_features.extend(['freight_ratio', 'total_value', 'is_free_shipping'])
# 2. Price binning
if 'price' in df_feat.columns:
df_feat['price_category'] = pd.qcut(
df_feat['price'], q=5,
labels=['very_low', 'low', 'medium', 'high', 'very_high'],
duplicates='drop'
)
new_features.append('price_category')
# 3. Product size features
size_cols = ['product_length_cm', 'product_height_cm', 'product_width_cm']
if all(c in df_feat.columns for c in size_cols):
df_feat['product_volume'] = (
df_feat['product_length_cm'] *
df_feat['product_height_cm'] *
df_feat['product_width_cm']
)
new_features.append('product_volume')
if 'product_weight_g' in df_feat.columns:
df_feat['product_density'] = (
df_feat['product_weight_g'] / df_feat['product_volume'].replace(0, np.nan)
).round(4)
df_feat['is_heavy'] = (df_feat['product_weight_g'] > 5000).astype(int)
new_features.extend(['product_density', 'is_heavy'])
# 4. Delivery features
if 'delivery_days' in df_feat.columns:
df_feat['delivery_category'] = pd.cut(
df_feat['delivery_days'],
bins=[0, 3, 7, 14, 30, float('inf')],
labels=['express', 'fast', 'normal', 'slow', 'very_slow']
)
df_feat['is_late'] = (df_feat['delivery_days'] > 14).astype(int)
new_features.extend(['delivery_category', 'is_late'])
# 5. Review features
if 'review_score' in df_feat.columns:
df_feat['is_positive'] = (df_feat['review_score'] >= 4).astype(int)
df_feat['is_negative'] = (df_feat['review_score'] <= 2).astype(int)
new_features.extend(['is_positive', 'is_negative'])
# 6. State-based features
if 'customer_state' in df_feat.columns:
state_region = {
'SP': 'Southeast', 'RJ': 'Southeast', 'MG': 'Southeast', 'ES': 'Southeast',
'PR': 'South', 'SC': 'South', 'RS': 'South',
'BA': 'Northeast', 'PE': 'Northeast', 'CE': 'Northeast',
'DF': 'Central-West', 'GO': 'Central-West', 'MT': 'Central-West',
'AM': 'North', 'PA': 'North',
}
df_feat['region'] = df_feat['customer_state'].map(state_region).fillna('Other')
new_features.append('region')
print(f" New features created: {len(new_features)}")
for feat in new_features:
dtype = df_feat[feat].dtype
nunique = df_feat[feat].nunique()
sample = df_feat[feat].head(3).tolist()
print(f" {feat}: dtype={dtype}, unique={nunique}, sample={sample}")
print(f"\n Shape: {df.shape}{df_feat.shape} (+{len(new_features)} features)")
return df_feat
# ==============================================================================
# MAIN
# ==============================================================================
if __name__ == '__main__':
print("=" * 70)
print(" LAB 4: DATA PREPROCESSING")
print(" BIM5021 - Nha kho du lieu va Tich hop")
print("=" * 70)
# 1. Create sample data
df = create_sample_olist()
# 2. Assess quality
assess_data_quality(df)
# 3. Clean data
df_clean = clean_data(df)
# 4. Normalize comparison
normalize_comparison(df_clean)
# 5. PCA
pca_analysis(df_clean)
# 6. Feature Engineering
df_features = feature_engineering(df_clean)
print("\n" + "=" * 70)
print(" HOAN THANH LAB 4!")
print(" Files: normalization_comparison.png, pca_analysis.png")
print("=" * 70)