#!/usr/bin/env python3
"""
Comprehensive UN General Assembly Resolution Crawler
Crawls ALL UN resolutions from 1946 to present
"""

import sqlite3
import requests
from bs4 import BeautifulSoup
import re
import time
import csv
from urllib.parse import urljoin, quote
import json
from datetime import datetime
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading

class ComprehensiveUNResolutionCrawler:
    def __init__(self, db_path="un_resolutions_complete.db"):
        self.db_path = db_path
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        })
        self.base_url = "https://www.un.org"
        self.voting_threshold = 10
        self.request_delay = 2
        self.max_retries = 3
        self.session_lock = threading.Lock()
        self.setup_database()

    def setup_database(self):
        """Create SQLite database and tables"""
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()

        cursor.execute('''
            CREATE TABLE IF NOT EXISTS resolutions (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                resolution_number TEXT UNIQUE,
                title TEXT,
                date TEXT,
                session_number INTEGER,
                topic_category TEXT,
                votes_for INTEGER,
                votes_against INTEGER,
                votes_abstain INTEGER,
                votes_absent INTEGER,
                is_lopsided BOOLEAN,
                minority_countries TEXT,
                minority_count INTEGER,
                majority_side TEXT,
                url TEXT,
                voting_record TEXT,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
        ''')

        cursor.execute('''
            CREATE TABLE IF NOT EXISTS crawl_log (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                session INTEGER,
                year INTEGER,
                resolutions_found INTEGER,
                success BOOLEAN,
                error_message TEXT,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
        ''')

        conn.commit()
        conn.close()

    def get_session_range(self):
        """Get range of UN GA sessions (1-current)"""
        # UN GA started in 1946, as of 2024 we're at session 78
        return range(1, 79)  # Sessions 1-78

    def get_resolution_urls_for_session(self, session_num):
        """Get all resolution URLs for a specific session"""
        urls = []

        # Try multiple URL patterns
        url_patterns = [
            f"https://www.un.org/en/ga/{session_num}/resolutions",
            f"https://documents.un.org/prod/ods.nsf/home.xsp?session={session_num}",
            f"https://digitallibrary.un.org/record/{session_num}",
            f"https://www.un.org/en/ga/documents/resolutions?session={session_num}"
        ]

        for pattern in url_patterns:
            try:
                print("Trying URL: {}".format(pattern))
                response = self.session.get(pattern, timeout=30)
                if response.status_code == 200:
                    soup = BeautifulSoup(response.content, 'html.parser')
                    found_urls = self.extract_resolution_links(soup, session_num)
                    if found_urls:
                        urls.extend(found_urls)
                        break
            except Exception as e:
                print("Error accessing {}: {}".format(pattern, e))
                continue

        return list(set(urls))  # Remove duplicates

    def extract_resolution_links(self, soup, session_num):
        """Extract resolution links from page content"""
        urls = []

        # Pattern 1: Links containing /resolution/
        resolution_links = soup.find_all('a', href=re.compile(r'/resolution/|/resolutions/', re.IGNORECASE))
        for link in resolution_links:
            href = link.get('href')
            if href:
                full_url = urljoin(self.base_url, href)
                urls.append(full_url)

        # Pattern 2: Links with resolution numbers
        text_links = soup.find_all('a', text=re.compile(r'A/RES/\d+|A/\d+/L\.\d+|A/\d+/R\.\d+', re.IGNORECASE))
        for link in text_links:
            href = link.get('href')
            if href:
                full_url = urljoin(self.base_url, href)
                urls.append(full_url)

        # Pattern 3: UN Digital Library pattern
        digital_links = soup.find_all('a', href=re.compile(r'digitallibrary\.un\.org/record/\d+', re.IGNORECASE))
        for link in digital_links:
            href = link.get('href')
            if href:
                urls.append(href)

        return urls

    def get_un_odb_data(self, session_num):
        """Get data from UN Official Document System"""
        base_url = "https://documents.un.org/prod/ods.nsf/home.xsp"
        urls = []

        try:
            # Try to get resolutions for specific session
            search_url = f"{base_url}?query=A/RES/{session_num}&sort=date&order=desc"
            response = self.session.get(search_url, timeout=30)

            if response.status_code == 200:
                soup = BeautifulSoup(response.content, 'html.parser')

                # Extract document links
                doc_links = soup.find_all('a', href=re.compile(r'view\.xsp\?documentId=', re.IGNORECASE))
                for link in doc_links:
                    href = link.get('href')
                    if href:
                        full_url = urljoin(base_url, href)
                        urls.append(full_url)

        except Exception as e:
            print("Error accessing UN ODB: {}".format(e))

        return urls

    def get_voting_data_from_page(self, url):
        """Extract voting data from a resolution page with multiple parsing methods"""
        voting_data = None

        for attempt in range(self.max_retries):
            try:
                response = self.session.get(url, timeout=30)
                response.raise_for_status()

                soup = BeautifulSoup(response.content, 'html.parser')

                # Method 1: Look for structured voting tables
                voting_data = self.parse_structured_voting_table(soup)

                # Method 2: Look for voting summary
                if not voting_data:
                    voting_data = self.parse_voting_summary(soup)

                # Method 3: Look for voting record text
                if not voting_data:
                    voting_data = self.parse_voting_text(soup)

                # Method 4: Look for voting record in document text
                if not voting_data:
                    voting_data = self.parse_document_text(soup)

                if voting_data:
                    break

                time.sleep(self.request_delay)

            except Exception as e:
                print("Attempt {} failed for {}: {}".format(attempt + 1, url, e))
                if attempt < self.max_retries - 1:
                    time.sleep(self.request_delay * 2)
                continue

        return voting_data

    def parse_structured_voting_table(self, soup):
        """Parse structured voting tables"""
        # Look for voting result tables
        tables = soup.find_all('table')

        for table in tables:
            rows = table.find_all('tr')
            votes = {'for': 0, 'against': 0, 'abstain': 0, 'absent': 0, 'countries': {'for': [], 'against': [], 'abstain': [], 'absent': []}}

            for row in rows:
                cells = row.find_all(['td', 'th'])
                if len(cells) >= 2:
                    label = cells[0].get_text(strip=True).lower()
                    value_text = cells[1].get_text(strip=True)

                    # Extract vote counts
                    for vote_type in ['for', 'against', 'abstain', 'absent']:
                        if vote_type in label or any(synonym in label for synonym in
                                                  ['yes', 'favour', 'no', 'oppose', 'abstention', 'absent']):
                            match = re.search(r'(\d+)', value_text)
                            if match:
                                votes[vote_type] = int(match.group(1))

                                # Try to get country list from third cell
                                if len(cells) > 2:
                                    country_text = cells[2].get_text(strip=True)
                                    votes['countries'][vote_type] = self.parse_country_list(country_text)

            if sum(votes.values()) > 0:
                return votes

        return None

    def parse_voting_summary(self, soup):
        """Parse voting summary information"""
        text = soup.get_text()

        # Look for voting patterns
        patterns = {
            'for': r'(\d+)\s*(?:in favour|for|yes|yea|adopted)',
            'against': r'(\d+)\s*(?:against|no|nay|opposed)',
            'abstain': r'(\d+)\s*(?:abstaining|abstention|abstain)',
            'absent': r'(\d+)\s*(?:absent|not voting|did not vote)'
        }

        votes = {'for': 0, 'against': 0, 'abstain': 0, 'absent': 0, 'countries': {'for': [], 'against': [], 'abstain': [], 'absent': []}}

        for vote_type, pattern in patterns.items():
            matches = re.findall(pattern, text, re.IGNORECASE)
            if matches:
                votes[vote_type] = int(matches[0])

        return votes if sum(votes.values()) > 0 else None

    def parse_voting_text(self, soup):
        """Parse voting information from text paragraphs"""
        paragraphs = soup.find_all(['p', 'div'])

        votes = {'for': 0, 'against': 0, 'abstain': 0, 'absent': 0, 'countries': {'for': [], 'against': [], 'abstain': [], 'absent': []}}

        for paragraph in paragraphs:
            text = paragraph.get_text(strip=True)

            # Look for voting patterns
            if re.search(r'voting|adopted|result', text, re.IGNORECASE):
                # Extract vote counts
                for vote_type in ['for', 'against', 'abstain', 'absent']:
                    pattern = r'(\d+)\s*(?:{})'.format(vote_type)
                    matches = re.findall(pattern, text, re.IGNORECASE)
                    if matches:
                        votes[vote_type] = int(matches[0])

                # Look for country lists
                country_patterns = [
                    r'countries voting (?:{}):?\s*([^.]*)'.format(vote_type),
                    r'list of countries voting (?:{}):?\s*([^.]*)'.format(vote_type),
                    r'following countries voted (?:{}):?\s*([^.]*)'.format(vote_type)
                ]

                for pattern in country_patterns:
                    matches = re.findall(pattern, text, re.IGNORECASE)
                    if matches:
                        votes['countries'][vote_type] = self.parse_country_list(matches[0])

        return votes if sum(votes.values()) > 0 else None

    def parse_document_text(self, soup):
        """Parse document text for voting records"""
        # Extract all text content
        text = soup.get_text()

        # Look for detailed voting records
        vote_record_patterns = [
            r'adopted by (\d+) votes to (\d+), with (\d+) abstentions?',
            r'adopted by a vote of (\d+) to (\d+), with (\d+) abstentions?',
            r'votes in favour: (\d+), against: (\d+), abstentions: (\d+)',
            r'recorded vote: (\d+) in favour, (\d+) against, (\d+) abstaining'
        ]

        votes = {'for': 0, 'against': 0, 'abstain': 0, 'absent': 0, 'countries': {'for': [], 'against': [], 'abstain': [], 'absent': []}}

        for pattern in vote_record_patterns:
            matches = re.findall(pattern, text, re.IGNORECASE)
            if matches:
                vote_counts = matches[0]
                votes['for'] = int(vote_counts[0])
                votes['against'] = int(vote_counts[1])
                votes['abstain'] = int(vote_counts[2]) if len(vote_counts) > 2 else 0
                break

        return votes if sum(votes.values()) > 0 else None

    def parse_country_list(self, country_text):
        """Parse list of countries from text"""
        countries = []

        if not country_text:
            return countries

        # Split by common separators
        separators = [',', ';', '\n', '•', '·']
        for separator in separators:
            if separator in country_text:
                countries = [c.strip() for c in country_text.split(separator) if c.strip()]
                break

        # Clean up country names
        cleaned_countries = []
        for country in countries:
            # Remove common prefixes/suffixes and formatting
            country = re.sub(r'^(The\s+|Republic\s+of\s+|Democratic\s+Republic\s+of\s+|People\'s\s+Republic\s+of\s+)', '', country, flags=re.IGNORECASE)
            country = re.sub(r'(\s+Republic|\s+State|\s+Kingdom|\s+Federation|\s+Emirates)$', '', country, flags=re.IGNORECASE)
            country = re.sub(r'^\d+\.?\s*', '', country)  # Remove numbering
            country = re.sub(r'\([^)]*\)', '', country)  # Remove parenthetical content
            country = country.strip()

            if country and len(country) > 2 and not country.lower() in ['and', 'or', 'the', 'of', 'etc']:
                cleaned_countries.append(country)

        return cleaned_countries[:50]  # Limit to reasonable number

    def classify_topic(self, title, content=""):
        """Classify resolution topic category"""
        text = (title + " " + content).lower()

        topic_keywords = {
            'Peace & Security': ['peace', 'security', 'conflict', 'war', 'military', 'terrorism', 'disarmament', 'weapon', 'nuclear', 'arms', 'conflict', 'aggression', 'occupation'],
            'Human Rights': ['human rights', 'refugee', 'asylum', 'discrimination', 'torture', 'gender', 'women', 'children', 'minority', 'indigenous', 'racism', 'freedom'],
            'Development': ['development', 'poverty', 'education', 'health', 'sustainable', 'millennium', 'economic', 'social', 'cooperation', 'assistance'],
            'Environment': ['climate', 'environment', 'biodiversity', 'pollution', 'sustainability', 'desertification', 'ozone', 'carbon', 'emissions', 'renewable'],
            'International Law': ['court', 'treaty', 'convention', 'law', 'legal', 'jurisdiction', 'justice', 'charter', 'protocol', 'agreement'],
            'Humanitarian': ['humanitarian', 'aid', 'relief', 'disaster', 'emergency', 'food', 'water', 'shelter', 'medical'],
            'Political': ['political', 'democracy', 'election', 'governance', 'institution', 'reform', 'administration'],
            'Economic': ['economic', 'finance', 'trade', 'investment', 'debt', 'currency', 'bank', 'monetary', 'fiscal'],
            'Social': ['social', 'culture', 'education', 'health', 'family', 'youth', 'aging', 'disability', 'employment'],
            'Administrative': ['budget', 'administration', 'programme', 'planning', 'coordination', 'management', 'headquarters'],
            'Other': 'other'
        }

        for category, keywords in topic_keywords.items():
            if isinstance(keywords, list):
                if any(keyword in text for keyword in keywords):
                    return category
            else:
                if keywords in text:
                    return category

        return 'Other'

    def is_lopsided_vote(self, votes):
        """Check if vote is lopsided (minority < threshold)"""
        minority_count = min(votes['for'], votes['against'])
        return minority_count < self.voting_threshold

    def get_minority_countries(self, votes):
        """Get countries in the minority vote"""
        if votes['for'] < votes['against']:
            return votes['countries']['for'], 'for'
        else:
            return votes['countries']['against'], 'against'

    def extract_resolution_number(self, url):
        """Extract resolution number from URL"""
        patterns = [
            r'A/RES/(\d+(?:/\d+)?)',
            r'A/(\d+)/[LR]\.(\d+)',
            r'resolution[/_](\d+(?:[/_]\d+)?)',
            r'document/(\d+(?:/\d+)?)'
        ]

        for pattern in patterns:
            matches = re.findall(pattern, url, re.IGNORECASE)
            if matches:
                if isinstance(matches[0], tuple):
                    return "A/RES/{}/{}".format(matches[0][0], matches[0][1])
                else:
                    return "A/RES/{}".format(matches[0])

        return "Unknown"

    def save_resolution(self, resolution_data):
        """Save resolution data to database"""
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()

        try:
            cursor.execute('''
                INSERT OR REPLACE INTO resolutions (
                    resolution_number, title, date, session_number, topic_category, votes_for, votes_against,
                    votes_abstain, votes_absent, is_lopsided, minority_countries, minority_count,
                    majority_side, url, voting_record
                ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
            ''', (
                resolution_data['number'],
                resolution_data['title'],
                resolution_data.get('date', ''),
                resolution_data.get('session', 0),
                resolution_data['topic_category'],
                resolution_data['votes_for'],
                resolution_data['votes_against'],
                resolution_data['votes_abstain'],
                resolution_data.get('votes_absent', 0),
                resolution_data['is_lopsided'],
                resolution_data['minority_countries'],
                resolution_data['minority_count'],
                resolution_data['majority_side'],
                resolution_data['url'],
                resolution_data.get('voting_record', '')
            ))

            conn.commit()
            print("Saved resolution {}: {}".format(resolution_data['number'], resolution_data['title']))

        except Exception as e:
            print("Error saving resolution {}: {}".format(resolution_data['number'], e))
            conn.rollback()

        conn.close()

    def process_resolution(self, url, session_num):
        """Process a single resolution URL"""
        try:
            voting_data = self.get_voting_data_from_page(url)

            if voting_data and (voting_data['for'] > 0 or voting_data['against'] > 0):
                # Extract basic info
                title = self.extract_title_from_url(url)
                resolution_number = self.extract_resolution_number(url)
                topic_category = self.classify_topic(title)
                is_lopsided = self.is_lopsided_vote(voting_data)

                minority_countries, majority_side = self.get_minority_countries(voting_data)
                minority_count = len(minority_countries)

                resolution_data = {
                    'number': resolution_number,
                    'title': title,
                    'url': url,
                    'session': session_num,
                    'topic_category': topic_category,
                    'votes_for': voting_data['for'],
                    'votes_against': voting_data['against'],
                    'votes_abstain': voting_data['abstain'],
                    'votes_absent': voting_data.get('absent', 0),
                    'is_lopsided': is_lopsided,
                    'minority_countries': json.dumps(minority_countries),
                    'minority_count': minority_count,
                    'majority_side': majority_side,
                    'voting_record': json.dumps(voting_data),
                    'date': self.extract_date_from_url(url)
                }

                self.save_resolution(resolution_data)
                return True

        except Exception as e:
            print("Error processing {}: {}".format(url, e))
            return False

    def extract_title_from_url(self, url):
        """Extract title from URL or page content"""
        try:
            response = self.session.get(url, timeout=10)
            soup = BeautifulSoup(response.content, 'html.parser')

            # Try to get title from page
            title = soup.find('title')
            if title:
                return title.get_text().strip()

            # Try h1 tags
            h1 = soup.find('h1')
            if h1:
                return h1.get_text().strip()

            # Extract from URL
            return url.split('/')[-1].replace('_', ' ').replace('-', ' ')

        except:
            return "Unknown Title"

    def extract_date_from_url(self, url):
        """Extract date from URL or page"""
        # Try to extract year from URL
        year_match = re.search(r'(19|20)\d{2}', url)
        if year_match:
            return year_match.group() + "-01-01"
        return ""

    def log_crawl_session(self, session_num, success, error_message="", resolutions_found=0):
        """Log crawl session results"""
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()

        try:
            cursor.execute('''
                INSERT INTO crawl_log (session, year, resolutions_found, success, error_message)
                VALUES (?, ?, ?, ?, ?)
            ''', (session_num, session_num + 1945, resolutions_found, success, error_message))

            conn.commit()

        except Exception as e:
            print("Error logging crawl session: {}".format(e))
            conn.rollback()

        conn.close()

    def crawl_all_resolutions(self, start_session=1, end_session=78):
        """Main crawling function for all sessions"""
        print("Starting comprehensive UN resolution crawl...")
        print("Sessions: {} to {}".format(start_session, end_session))
        print("Voting threshold: {}".format(self.voting_threshold))

        total_resolutions = 0

        for session_num in range(start_session, end_session + 1):
            print("\n" + "="*50)
            print("Crawling Session {} (Year {})".format(session_num, session_num + 1945))
            print("="*50)

            try:
                # Get all resolution URLs for this session
                urls = self.get_resolution_urls_for_session(session_num)

                # Also try UN Official Document System
                odb_urls = self.get_un_odb_data(session_num)
                urls.extend(odb_urls)

                # Remove duplicates
                urls = list(set(urls))

                print("Found {} resolution URLs for session {}".format(len(urls), session_num))

                if not urls:
                    print("No resolutions found for session {}".format(session_num))
                    self.log_crawl_session(session_num, False, "No resolutions found")
                    continue

                # Process resolutions
                session_resolutions = 0
                for i, url in enumerate(urls):
                    print("Processing {}/{}: {}".format(i+1, len(urls), url))

                    if self.process_resolution(url, session_num):
                        session_resolutions += 1

                    # Rate limiting
                    time.sleep(self.request_delay)

                total_resolutions += session_resolutions
                print("Session {} complete: {} resolutions processed".format(session_num, session_resolutions))
                self.log_crawl_session(session_num, True, "", session_resolutions)

                # Longer delay between sessions
                time.sleep(self.request_delay * 2)

            except Exception as e:
                print("Error crawling session {}: {}".format(session_num, e))
                self.log_crawl_session(session_num, False, str(e))
                continue

        print("\n" + "="*50)
        print("CRAWLING COMPLETE")
        print("="*50)
        print("Total resolutions processed: {}".format(total_resolutions))
        print("Database: {}".format(self.db_path))

    def export_to_csv(self, filename="un_resolutions_comprehensive.csv"):
        """Export database to CSV file"""
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()

        cursor.execute('''
            SELECT resolution_number, title, date, session_number, topic_category, votes_for, votes_against,
                   votes_abstain, votes_absent, is_lopsided, minority_countries, minority_count,
                   majority_side, url, voting_record
            FROM resolutions
            ORDER BY session_number, date
        ''')

        with open(filename, 'w', newline='', encoding='utf-8') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(['Resolution Number', 'Title', 'Date', 'Session', 'Topic Category',
                           'Votes For', 'Votes Against', 'Votes Abstain', 'Votes Absent', 'Is Lopsided',
                           'Minority Countries', 'Minority Count', 'Majority Side', 'URL',
                           'Abstain Countries', 'Abstain Countries Count'])

            for row in cursor.fetchall():
                # Parse minority countries from JSON
                minority_countries = json.loads(row[10]) if row[10] and isinstance(row[10], str) else []
                minority_countries_str = '; '.join(minority_countries)

                # Parse abstain countries from voting_record JSON
                abstain_countries = []
                if row[14]:
                    try:
                        voting_record = json.loads(row[14])
                        abstain_countries = voting_record.get('countries', {}).get('abstain', [])
                    except json.JSONDecodeError:
                        pass
                abstain_countries_str = '; '.join(abstain_countries)
                abstain_countries_count = len(abstain_countries)

                writer.writerow([
                    row[0], row[1], row[2], row[3], row[4], row[5],
                    row[6], row[7], row[8], row[9], minority_countries_str, row[11], row[12], row[13],
                    abstain_countries_str, abstain_countries_count
                ])

        conn.close()
        print("Exported lopsided resolutions to {}".format(filename))

def main():
    # Check if we want to resume or start fresh
    db_path = "un_resolutions_complete.db"
    if os.path.exists(db_path):
        print("Existing database found. Starting comprehensive crawl...")

    crawler = ComprehensiveUNResolutionCrawler(db_path)

    # Crawl all sessions from 1946 to present
    crawler.crawl_all_resolutions(start_session=1, end_session=78)

    # Export results
    crawler.export_to_csv()

    print("Comprehensive crawling completed!")

if __name__ == "__main__":
    main()