import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
import time
import re
import urllib.parse
import os
import random
import traceback
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

def create_session_with_retry():
    """
    Create a session with retry capabilities
    """
    session = requests.Session()
    
    # Configure retry strategy
    retries = Retry(
        total=5,
        backoff_factor=1,
        status_forcelist=[429, 500, 502, 503, 504],
        allowed_methods=["GET", "POST"]
    )
    
    # Mount the adapter to the session
    adapter = HTTPAdapter(max_retries=retries)
    session.mount("http://", adapter)
    session.mount("https://", adapter)
    
    return session

def get_browser_headers():
    """
    Return realistic browser headers
    """
    return {
        'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Connection': 'keep-alive'
    }

def extract_chemical_info(html_content):
    """
    Extract chemical information from the boxList div
    """
    # Initialize properties dictionary
    properties = {
        "分子式": "",
        "CAS号": "",
        "活性": "",
        "药理作用": "",
        "CB号": ""
    }
    
    # Parse HTML
    soup = BeautifulSoup(html_content, 'html.parser')
    
    # Find the boxList div
    box_list = soup.find('div', id='boxList')
    if not box_list:
        print("boxList div not found")
        return properties
    
    # Extract list items
    list_items = box_list.find_all('li')
    
    for item in list_items:
        item_text = item.get_text(strip=True)
        item_html = str(item)
        
        # Extract CB number
        cb_link = item.select_one('a[href*="ProductChemicalPropertiesCB"]')
        if cb_link:
            cb_match = re.search(r'ProductChemicalPropertiesCB(\d+)', cb_link['href'])
            if cb_match:
                properties["CB号"] = f"CB{cb_match.group(1)}"
        
        # Extract molecular formula (MF)
        if 'MF' in item_text:
            mf_span = item.select_one('span.W-tit + span')
            if mf_span:
                properties["分子式"] = mf_span.get_text(strip=True)
            else:
                # Try with regex as fallback
                mf_match = re.search(r'MF</span><span>([^<]+)', item_html)
                if mf_match:
                    properties["分子式"] = mf_match.group(1).strip()
        
        # Extract CAS number
        if 'CAS' in item_text:
            cas_span = item.select_one('span.W-tit + span')
            if cas_span:
                properties["CAS号"] = cas_span.get_text(strip=True)
            else:
                # Try with regex as fallback
                cas_match = re.search(r'CAS</span><span>([^<]+)', item_html)
                if cas_match:
                    properties["CAS号"] = cas_match.group(1).strip()
    
    return properties

def get_additional_chemical_details(session, cb_number):
    """
    Get additional chemical details from the CB number URL
    """
    # Default empty additional properties
    additional_properties = {
        "活性": "",
        "药理作用": ""
    }
    
    if not cb_number:
        return additional_properties
    
    # Construct the full URL
    details_url = f"https://m.chemicalbook.com/ProductChemicalPropertiesCB{cb_number}.htm"
    
    try:
        # Add random delay to mimic human behavior
        time.sleep(random.uniform(2, 5))
        
        # Send request
        response = session.get(details_url, headers=get_browser_headers(), timeout=30)
        
        # Check response
        if response.status_code != 200:
            print(f"Failed to fetch details for {cb_number}: HTTP {response.status_code}")
            return additional_properties
        
        # Parse HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # Extract biological activity
        biological_activity_div = soup.find('div', class_='gsbt', string=re.compile('生物活性'))
        if biological_activity_div and biological_activity_div.next_sibling:
            biological_activity = biological_activity_div.next_sibling.strip()
            additional_properties["活性"] = biological_activity
        
        # Extract pharmacological effects
        pharmacological_effects_div = soup.find('div', class_='gsbt', string=re.compile('用途'))
        if pharmacological_effects_div and pharmacological_effects_div.next_sibling:
            pharmacological_effects = pharmacological_effects_div.next_sibling.strip()
            additional_properties["药理作用"] = pharmacological_effects
        
        return additional_properties
    
    except Exception as e:
        print(f"Error fetching details for {cb_number}: {e}")
        return additional_properties

def search_chemicalbook(session, name, name_type='中文名'):
    """
    Search ChemicalBook for the given name 
    name_type can be '中文名' or '英文名'
    """
    # Initialize an empty properties dictionary
    empty_properties = {
        "分子式": "",
        "CAS号": "",
        "活性": "",
        "药理作用": "",
        "CB号": ""
    }
    
    if not name or pd.isna(name):
        return empty_properties
    
    # Encode the name for the URL
    encoded_name = urllib.parse.quote(name)
    search_url = f"https://m.chemicalbook.com/Search.aspx?keyword={encoded_name}"
    
    try:
        # Add random delay to mimic human behavior
        time.sleep(random.uniform(2, 5))
        
        # Send request
        response = session.get(search_url, headers=get_browser_headers(), timeout=30)
        
        # Check response
        if response.status_code != 200:
            print(f"Failed to search for '{name}' ({name_type}): HTTP {response.status_code}")
            return empty_properties
        
        # Extract chemical information
        properties = extract_chemical_info(response.text)
        
        return properties
    
    except Exception as e:
        print(f"Error searching for '{name}' ({name_type}): {e}")
        return empty_properties

def process_excel_file(input_file, output_file):
    """
    Process the input Excel file, search ChemicalBook for each name-
    First try with 中文名, then fall back to 英文名
    """
    # Read the input Excel file
    df = pd.read_excel(input_file)
    new_row = pd.DataFrame({'中文名': ['随便'], **{col: [None] for col in df.columns if col != '中文名'}})
    df = pd.concat([new_row, df]).reset_index(drop=True)
    df['中文名'] = df['中文名'].str.replace(r'\s*-\s*', '-', regex=True)
    
    # Create a new DataFrame for results
    results = pd.DataFrame(columns=[
        "中文名", "英文名", "分子式", "CAS号", "SMILES", "二级碎片", "活性", "毒性", "药理作用", "参考文献", "CB号"
    ])
    
    # Create a session for all requests
    session = create_session_with_retry()
    
    # Iterate through each row in the input DataFrame
    for index, row in df.iterrows():
        chinese_name = row.get("中文名")
        english_name = row.get("英文名")
        
        # Determine which name to use
        search_name = ""
        name_type = ""
        
        # Prioritize 中文名
        if chinese_name and not pd.isna(chinese_name):
            search_name = chinese_name
            name_type = "中文名"
        # Fall back to 英文名
        elif english_name and not pd.isna(english_name):
            search_name = english_name
            name_type = "英文名"
        else:
            # Skip if no name is available
            print(f"Skipping row {index+1}: No name found")
            continue
        
        print(f"Processing {index+1}/{len(df)}: {search_name} ({name_type})")
        
        # Search ChemicalBook for the name and extract properties
        properties = search_chemicalbook(session, search_name, name_type)
        
        # If CB号 exists, fetch additional details
        additional_details = {}
        if properties["CB号"]:
            cb_number = properties["CB号"].replace("CB", "")
            additional_details = get_additional_chemical_details(session, cb_number)
        
        # Merge the additional details with existing properties
        properties.update(additional_details)
        
        # Add the results to the new DataFrame
        new_row = pd.DataFrame({
            "中文名": [chinese_name],
            "英文名": [english_name],
            "分子式": [properties["分子式"]],
            "CAS号": [properties["CAS号"]],
            "SMILES": [""],
            "二级碎片": [""],
            "活性": [properties["活性"]],
            "毒性": [""],
            "药理作用": [properties["药理作用"]],
            "参考文献": [""],
            "CB号": [properties["CB号"]]
        })
        results = pd.concat([results, new_row], ignore_index=True)
    
    # Save the results to a new Excel file
    results.to_excel(output_file, index=False)
    print(f"Results saved to {output_file}")

def main():
    input_file = "API/蔓荆子0320.xlsx"
    output_file = "API/蔓荆子0320_updated.xlsx"
    
    # Get current directory
    current_dir = os.getcwd()
    print(f"Current working directory: {current_dir}")
    
    # Ensure the API directory exists
    os.makedirs("API", exist_ok=True)
    print(f"Made sure 'API' directory exists")
    
    # Check if the input file exists
    if not os.path.exists(input_file):
        print(f"Input file '{input_file}' not found.")
        return
    else:
        print(f"Found input file: {input_file}")
    
    # Process the file
    process_excel_file(input_file, output_file)

if __name__ == "__main__":
    main()


def add_website_column(input_file, output_file):
    """
    Add a new column '网站' to the Excel file with URLs based on the 'CB号' column.
    
    Parameters:
    input_file (str): Path to the input Excel file
    output_file (str): Path to save the modified Excel file
    """
    # Read the Excel file
    df = pd.read_excel(input_file)
    df = df.iloc[1:] 
    
    # Create the '网站' column by constructing the URL
    df['网站'] = 'https://www.chemicalbook.com/ProductChemicalProperties' + df['CB号'] + '.htm'
    
    # Save the modified DataFrame to a new Excel file
    df.to_excel(output_file, index=False)
    
    print(f"Modified file saved to {output_file}")

# Example usage
input_file = 'API/蔓荆子0320_updated.xlsx'
output_file = 'API/蔓荆子0320_updated.xlsx'
add_website_column(input_file, output_file)