import pandas as pd
import numpy as np
import time
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from joblib import Parallel, delayed

# Load the dataset
file_path = 'Asia towers.csv'  # Update with the actual path
data = pd.read_csv(file_path)

# Rename columns for easier access
data.columns = ['Index', 'Radio', 'MCC', 'MNC', 'TAC', 'CID', 'Unit', 'Longitude', 'Latitude', 'Range', 'Samples', 'Changeable', 'Created', 'Updated', 'AverageSignal', 'Country', 'Network', 'Continent']
data = data.drop(columns=['Index'])
data = data.dropna(subset=['Latitude', 'Longitude', 'AverageSignal', 'Range'])

# Select relevant features for clustering
features = data[['Latitude', 'Longitude', 'AverageSignal', 'Range']]

# Standardize the data
scaler = StandardScaler()
scaled_features = scaler.fit_transform(features)

def kmeans_clustering(n_clusters, features):
    kmeans = KMeans(n_clusters=n_clusters, random_state=42)
    kmeans.fit(features)
    return kmeans

# Evaluate performance in serial
start_time = time.time()
kmeans_serial = kmeans_clustering(5, scaled_features)
serial_duration = time.time() - start_time
print(f"Serial K-means clustering took {serial_duration:.2f} seconds")

# Evaluate performance with parallel processing
def evaluate_parallel(n_jobs):
    start_time = time.time()
    parallel_kmeans = Parallel(n_jobs=n_jobs)(delayed(kmeans_clustering)(5, scaled_features) for _ in range(1))
    parallel_duration = time.time() - start_time
    return parallel_duration

# Test with different number of threads
n_jobs_list = [1, 2, 4, 8]
parallel_durations = {}

for n_jobs in n_jobs_list:
    duration = evaluate_parallel(n_jobs)
    parallel_durations[n_jobs] = duration
    print(f"Parallel K-means clustering with {n_jobs} jobs took {duration:.2f} seconds")

# Compare results
print(f"\nSerial Duration: {serial_duration:.2f} seconds")
for n_jobs, duration in parallel_durations.items():
    print(f"Parallel Duration with {n_jobs} jobs: {duration:.2f} seconds")
