from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import time
import pandas as pd
import os



def check_py_module(package):
    try:
        exec("import {0}".format(package))
    except ModuleNotFoundError:
        inquiry = ''
        while (inquiry !='y' and inquiry !='n'):
            inquiry=input('This scripts requires {0}. Do you want to install {0} [y/n]'.format(package))
        if inquiry == 'y':
            print("Execute commands: pip install {0}".format(package))
            os.system("pip install {0}".format(package))
        else:
            print("{0} is missing, so the program exits!".format(package))
            
def pre_checkall():
    #Pre Check Python Modules
    requir_modules=['selenium','pandas']
    for m in requir_modules:
        check_py_module(m)

    
def scrape_table_data(driver):
    """Extract table data from the current page."""
    data = []
    try:
        table = WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.CLASS_NAME, "table_text"))
        )
        rows = table.find_elements(By.TAG_NAME, "tr")

        for row in rows[1:]:  # Skip the header row
            cells = row.find_elements(By.TAG_NAME, "td")
            row_data = []
            for cell in cells:
                # If there's a title attribute, add its content
                res_a = cell.find_elements(By.TAG_NAME, "a")
                if res_a:
                    title = res_a[0].get_attribute("title")
                    row_data.append(title)
                else:
                    text = cell.text.strip()
                    row_data.append(text)
            # Add hyperlink UUID from the first column if it exists
            if cells:
                link_element = cells[0].find_element(By.TAG_NAME, "a")
                hyperlink = link_element.get_attribute("href") if link_element else ""
                # Extract UUID from the hyperlink
                if "urlOpen('" in hyperlink:
                    uuid_start = hyperlink.find("urlOpen('") + len("urlOpen('")
                    uuid_end = hyperlink.find("')")
                    uuid = hyperlink[uuid_start:uuid_end]
                    formatted_link = '=HYPERLINK("https://ctbpsp.com/#/bulletinDetail?uuid=' + uuid + '&inpvalue=&dataSource=0&tenderAgency=","点击查看公告")'
                else:
                    formatted_link = ""
                row_data.append(formatted_link)
            data.append(row_data)
    except TimeoutException:
        print("Table not found on this page.")
    return data

def navigate_and_scrape(base_url):
    """Navigate through pages and scrape data."""
    #init chrome driver
    driver = webdriver.Chrome()  # Replace with your driver, e.g., `webdriver.Firefox()`
    driver.get(base_url)
    all_data = []

    while True:
        # Scrape the current page
        all_data.extend(scrape_table_data(driver))
        
        # Check for and click the next button
        try:
            next_button = WebDriverWait(driver, 10).until(
                EC.element_to_be_clickable((By.LINK_TEXT, "下一页"))  # Adjust the text if necessary
            )
            next_button.click()
            time.sleep(2)  # Allow time for the page to load

        except TimeoutException:
            print("No more pages to navigate.")
            break

    driver.quit()
    return all_data

def save_to_csv(data, filename):
    """Save the scraped data to a CSV file."""
    df = pd.DataFrame(data)
    df.to_csv(filename, index=False, header=False, encoding='utf-8-sig')

def save_to_excel(data, filename):
    """Save the scraped data to an Excel file with UTF-8 encoding."""
    headers = ["招标公告名称", "所属行业", "所属地区", "来源渠道", "公告发布时间", "距离开标时间", "链接"]
    df = pd.DataFrame(data, columns=headers)
    df.to_excel(filename, index=False)


if __name__ == "__main__":
    
    #Check Python Requirement
    pre_checkall()
    
    '''
    (1) industryName options
        房屋建筑: G01
        市政: G02		
        公路: G03      
        铁路: G04    
        民航: G05      
        水运: G06      
        水利水电: G07
        能源电力: G08
        广电通信: G09  
        化学工业: G10  
        石油石化: G11  
        园林绿化: G12  
        生物医药: G13  
        港口航道: G14  
        纺织轻工: G15  
        矿产冶金: G16  
        航空航天: G17  
        生态环保: G18  
        地球科学: G19  
        信息电子: G20  
        科教文卫: G21   
        商业服务: G22  
        农林牧渔: G23  
        保险金融: G24  
        机械设备: G25  
        其他: G99

    (2)categoryId 
        招标公告: 88
        中标结果公示: 90
        更正公告公示: 90
        资格预审公告: 92
        中标候选人公示: 91
    '''
    # Excel File Path
    excel_file_path="D:\\scraped_data.xlsx"
    #Default ['G20','G09','G22','G24']
    industryNames=['G09','G20','G22','G24'] #Default  广电通信, 信息电子, 商业服务, 保险金融
    categoryId='88' #Default 招标公告
    startcheckDate='2025-01-01' #Date Format 2025-01-01
    endcheckDate='2025-01-02'   #Date Format 2025-02-01
    SearchKeyWord=''
    excel_data=[]
    
    if industryNames:
        for i in industryNames:
            url = f"https://bulletin.cebpubservice.com/xxfbcmses/search/bulletin.html?searchDate=2025-1-1&dates=300&word={SearchKeyWord}&categoryId={categoryId}&industryName={i}&startcheckDate={startcheckDate}&endcheckDate={endcheckDate}&area=&status=&publishMedia=&sourceInfo=&showStatus=1&page=1"
            excel_data.extend(navigate_and_scrape(url))
    else:
        url = f"https://bulletin.cebpubservice.com/xxfbcmses/search/bulletin.html?searchDate=2025-1-1&dates=300&word={SearchKeyWord}&categoryId={categoryId}&industryName=&startcheckDate={startcheckDate}&endcheckDate={endcheckDate}&area=&status=&publishMedia=&sourceInfo=&showStatus=1&page=1"
        excel_data.extend(navigate_and_scrape(url))

    if excel_data:
        save_to_excel(excel_data, excel_file_path)
        print("Data saved to scraped_data.xlsx")
    else:
        print("No data scraped.")