#!/usr/bin/env python3
import csv
import json
import os
import re
import hashlib
from pathlib import Path

INPUT_DIR = Path(__file__).resolve().parents[1] / 'fianl_answer'
OUT_DIR = INPUT_DIR / 'converted_bigint'
OUT_DIR.mkdir(parents=True, exist_ok=True)

# BIGINT range: -2^63 to 2^63-1, i.e., -9223372036854775808 to 9223372036854775807
# We'll map to positive range only: 0 to 9223372036854775807 (19 digits max)
MAX_BIGINT = 9223372036854775807

# Mapping for long numbers -> hashed BIGINT
long_to_bigint = {}
bigint_collisions = []

# helper to parse the properties column which may look like: {"original_msisdn": "abcd"}
def parse_properties(s: str):
    if s is None:
        return {}
    s = s.strip()
    # csv module usually unescapes double quotes, but be defensive
    try:
        return json.loads(s)
    except Exception:
        try:
            fixed = s.replace('""', '"')
            return json.loads(fixed)
        except Exception:
            # fallback: crude extraction of key/value pairs
            m = re.search(r'original_msisdn"\s*:\s*"([0-9a-fA-F]+)"', s)
            if m:
                return {"original_msisdn": m.group(1)}
            return {}

def to_bigint(num_str: str) -> str:
    """
    Convert a numeric string to BIGINT-compatible value.
    If <= 19 digits and fits in signed 64-bit, return as-is.
    Otherwise, hash it to a positive 63-bit integer and record mapping.
    """
    digits = re.sub(r'[^0-9]', '', num_str)
    if not digits:
        return num_str
    
    # check if it fits in BIGINT range (must be <= 19 digits AND <= MAX_BIGINT value)
    if len(digits) <= 19:
        try:
            val = int(digits)
            if val <= MAX_BIGINT:
                # fits in BIGINT, no need to hash
                return digits
        except:
            pass
    
    # too long or too large: need to hash it
    # check if already mapped
    if digits in long_to_bigint:
        return long_to_bigint[digits]
    
    # use sha256 and take first 63 bits as positive integer
    h = hashlib.sha256(digits.encode('utf-8')).digest()
    # convert first 8 bytes to int, mask to 63 bits (positive)
    hashed = int.from_bytes(h[:8], 'big') & 0x7FFFFFFFFFFFFFFF
    hashed_str = str(hashed)
    
    # check collision (rare but possible)
    for existing_digits, existing_hash in long_to_bigint.items():
        if existing_hash == hashed_str and existing_digits != digits:
            bigint_collisions.append({
                'original1': existing_digits,
                'original2': digits,
                'hashed_bigint': hashed_str
            })
            # re-hash with salt to resolve collision (simple approach)
            h2 = hashlib.sha256((digits + '_collision').encode('utf-8')).digest()
            hashed = int.from_bytes(h2[:8], 'big') & 0x7FFFFFFFFFFFFFFF
            hashed_str = str(hashed)
            break
    
    long_to_bigint[digits] = hashed_str
    return hashed_str

# 1) Process user_v.csv: convert MSISDN to BIGINT and rename properties key
user_v_in = INPUT_DIR / 'user_v.csv'
user_v_out = OUT_DIR / 'user_v.csv'
with user_v_in.open('r', encoding='utf-8', newline='') as f_in, user_v_out.open('w', encoding='utf-8', newline='') as f_out:
    reader = csv.DictReader(f_in)
    fieldnames = reader.fieldnames if reader.fieldnames else ['MSISDN','properties']
    writer = csv.DictWriter(f_out, fieldnames=fieldnames)
    writer.writeheader()
    for row in reader:
        # Convert MSISDN to BIGINT
        ms = row.get('MSISDN', '').strip()
        if ms:
            row['MSISDN'] = to_bigint(ms)
        
        # Update properties: rename key from original_msisdn to MSISDN
        props_raw = row.get('properties', '')
        props = parse_properties(props_raw)
        orig = props.get('original_msisdn')
        if orig:
            # rename key to MSISDN (keep value the same)
            new_props = {"MSISDN": orig}
        else:
            new_props = props
        # write row with updated properties as json string
        row['properties'] = json.dumps(new_props, ensure_ascii=False)
        writer.writerow(row)

print(f"Processed user_v.csv -> converted_bigint/user_v.csv (BIGINT-compatible)")

# 2) Process user.csv: convert MSISDN to BIGINT-compatible format
user_in = INPUT_DIR / 'user.csv'
user_out = OUT_DIR / 'user.csv'
with user_in.open('r', encoding='utf-8', newline='') as f_in, user_out.open('w', encoding='utf-8', newline='') as f_out:
    reader = csv.DictReader(f_in)
    fieldnames = reader.fieldnames
    writer = csv.DictWriter(f_out, fieldnames=fieldnames)
    writer.writeheader()
    for row in reader:
        ms = row.get('MSISDN','').strip()
        if ms:
            row['MSISDN'] = to_bigint(ms)
        writer.writerow(row)

print('Processed user.csv -> converted_bigint/user.csv (BIGINT-compatible)')

tv_in = INPUT_DIR / 'tv.csv'
tv_out = OUT_DIR / 'tv.csv'
# 3) tv.csv: copy unchanged as requested
tv_in = INPUT_DIR / 'tv.csv'
tv_out = OUT_DIR / 'tv.csv'
with tv_in.open('r', encoding='utf-8', newline='') as f_in, tv_out.open('w', encoding='utf-8', newline='') as f_out:
    reader = csv.reader(f_in)
    writer = csv.writer(f_out)
    for row in reader:
        writer.writerow(row)

print('Copied tv.csv unchanged (not processed)')

# 4) Process call_edges.csv: convert MSISDN and OPP_MSISDN to BIGINT-compatible format
call_in = INPUT_DIR / 'call_edges.csv'
call_out = OUT_DIR / 'call_edges.csv'
with call_in.open('r', encoding='utf-8', newline='') as f_in, call_out.open('w', encoding='utf-8', newline='') as f_out:
    reader = csv.reader(f_in)
    writer = csv.writer(f_out)
    
    # Read header line
    header = next(reader)
    # Check if header is incomplete (only has 'c,properties' instead of full column names)
    if len(header) == 2 and header[0] in ['c', 'MSISDN']:
        # Fix header to have correct column names
        header = ['MSISDN', 'OPP_MSISDN', 'properties']
    writer.writerow(header)
    
    # Process data rows
    for row in reader:
        if len(row) >= 2:
            # Convert first two columns (MSISDN, OPP_MSISDN)
            row[0] = to_bigint(row[0].strip())
            row[1] = to_bigint(row[1].strip())
        writer.writerow(row)

print('Processed call_edges.csv -> converted_bigint/call_edges.csv (BIGINT-compatible)')

# 5) Write mapping table for long numbers -> BIGINT hashes
if long_to_bigint:
    mapping_path = OUT_DIR / 'bigint_mapping.csv'
    with mapping_path.open('w', encoding='utf-8', newline='') as mf:
        mw = csv.writer(mf)
        mw.writerow(['original_number', 'bigint_value', 'length'])
        for orig, bigint_val in long_to_bigint.items():
            mw.writerow([orig, bigint_val, len(orig)])
    print(f'Wrote BIGINT mapping table: {mapping_path} ({len(long_to_bigint)} entries)')
else:
    print('No long numbers needed hashing (all fit in BIGINT range)')

# 6) Write collision report if any
if bigint_collisions:
    collision_path = OUT_DIR / 'bigint_collisions.csv'
    with collision_path.open('w', encoding='utf-8', newline='') as cf:
        cw = csv.DictWriter(cf, fieldnames=['original1','original2','hashed_bigint'])
        cw.writeheader()
        for c in bigint_collisions:
            cw.writerow(c)
    print(f'WARNING: Hash collisions detected: {collision_path} ({len(bigint_collisions)} collisions)')

print('\nAll done. Outputs written to:')
print(OUT_DIR)
print('\nSummary:')
print(f'  - user_v.csv: properties key renamed to MSISDN')
print(f'  - user.csv: MSISDN converted to BIGINT')
print(f'  - tv.csv: copied unchanged')
print(f'  - call_edges.csv: MSISDN/OPP_MSISDN converted to BIGINT')
print(f'  - bigint_mapping.csv: {len(long_to_bigint)} long numbers mapped to BIGINT hashes')
if bigint_collisions:
    print(f'  - bigint_collisions.csv: {len(bigint_collisions)} hash collisions (resolved with salt)')
print('\nYou can now import these CSV files into a database with BIGINT columns.')
