import pandas as pd
import requests
from bs4 import BeautifulSoup
import os
import time
import urllib.parse

class TCMSPScraper:
    def __init__(self, input_file):
        """
        Initialize the scraper with the input Excel file
        
        :param input_file: Path to the input Excel file
        """
        self.df = pd.read_excel(input_file)
        
        # Create output directory if it doesn't exist
        self.output_dir = 'herb_ingredients'
        os.makedirs(self.output_dir, exist_ok=True)
    
    def sanitize_filename(self, filename):
        """
        Create a safe filename by removing or replacing invalid characters
        
        :param filename: Original filename
        :return: Sanitized filename
        """
        return "".join(c for c in filename if c.isalnum() or c in (' ', '.', '_')).rstrip()
    
    def search_herb(self, herb_name):
        """
        Search for the herb on TCMSP website
        
        :param herb_name: Chinese name of the herb
        :return: URL of the herb page or None if not found
        """
        base_url = 'https://www.tcmsp-e.com/browse.php'
        
        # URL encode the herb name
        search_params = {
            'qc': 'herbs',
            'q': herb_name
        }
        
        try:
            # Send search request
            response = requests.get(base_url, params=search_params)
            
            # Check if request was successful
            if response.status_code != 200:
                print(f"Failed to search for {herb_name}. Status code: {response.status_code}")
                return None
            
            # Parse the search results
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # Find the first matching herb link
            herb_link = soup.find('a', string=lambda text: text and herb_name in text)
            
            if herb_link and 'href' in herb_link.attrs:
                # Construct full URL
                herb_url = f"https://www.tcmsp-e.com/{herb_link['href']}"
                return herb_url
            
            print(f"No exact match found for {herb_name}")
            return None
        
        except requests.RequestException as e:
            print(f"Error searching for {herb_name}: {e}")
            return None
    
    def extract_ingredients(self, herb_url):
        """
        Extract ingredients from the herb's details page
        
        :param herb_url: URL of the herb's details page
        :return: List of ingredients
        """
        try:
            # Send request to herb page
            response = requests.get(herb_url)
            
            # Check if request was successful
            if response.status_code != 200:
                print(f"Failed to fetch herb details. Status code: {response.status_code}")
                return []
            
            # Parse the page
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # Find ingredients table (this might need adjustment based on actual website structure)
            ingredients_table = soup.find('table', class_='ingredients-table')
            
            if not ingredients_table:
                print("Ingredients table not found")
                return []
            
            # Extract ingredient names
            ingredients = []
            for row in ingredients_table.find_all('tr')[1:]:  # Skip header row
                cols = row.find_all('td')
                if len(cols) > 1:
                    ingredient_name = cols[1].get_text(strip=True)
                    ingredients.append(ingredient_name)
            
            return ingredients
        
        except requests.RequestException as e:
            print(f"Error extracting ingredients: {e}")
            return []
    
    def process_herbs(self):
        """
        Process each herb in the DataFrame
        """
        results = []
        
        for index, row in self.df.iterrows():
            herb_name = row['Name_CN']
            print(f"Processing {herb_name}...")
            
            # Search for herb
            herb_url = self.search_herb(herb_name)
            
            if not herb_url:
                print(f"Skipping {herb_name} - No URL found")
                continue
            
            # Extract ingredients
            ingredients = self.extract_ingredients(herb_url)
            
            if not ingredients:
                print(f"No ingredients found for {herb_name}")
                continue
            
            # Create DataFrame for ingredients
            ingredients_df = pd.DataFrame({
                'Ingredient': ingredients
            })
            
            # Sanitize filename
            safe_filename = self.sanitize_filename(herb_name)
            output_path = os.path.join(self.output_dir, f"{safe_filename}_ingredients.xlsx")
            
            # Save to Excel
            ingredients_df.to_excel(output_path, index=False)
            print(f"Saved ingredients for {herb_name} to {output_path}")
            
            # Store result for potential summary
            results.append({
                'Herb': herb_name,
                'Ingredients_Count': len(ingredients),
                'Output_File': output_path
            })
            
            # Be nice to the server
            time.sleep(2)
        
        # Create a summary file
        summary_df = pd.DataFrame(results)
        summary_df.to_excel(os.path.join(self.output_dir, 'herbs_ingredients_summary.xlsx'), index=False)
        print("Processing complete. Check the output directory.")

# Main execution
if __name__ == "__main__":
    scraper = TCMSPScraper('中文药材.xlsx')
    scraper.process_herbs()