import time
import random
import schedule
import requests
import send_email
import pandas as pd
from io import StringIO
from bs4 import BeautifulSoup
from datetime import datetime

def request_url(queryword="碳盘查", url="https://s.zhaobiao.cn/s"):
    data = {
        "attachment": "1",
        "field": "all",
        "searchModel": "1",
        "leftday": "1",
        "searchtype": "zb",
        "currentpage": "1",
        "isLogin": "false",
        "totalPage": "50",
        "maxPages": "50",
        "relTotalPages": "230",
        "radiobutton": "radiobutton"
    }
    data['queryword'] = queryword
    # print("request data is: ", data)
    headers = {
        "User-Agent": "MyApp/1.0",
        "content-type": "application/x-www-form-urlencoded",
        "Referer": "https://s.zhaobiao.cn/s"
    }
    response = requests.post(url, data=data, headers=headers)
    print("request response status code is:", response.status_code)
    if(response.status_code == 200):
      soup = BeautifulSoup(response.text, 'html.parser')
      table = soup.find('table')
      return table
    elif(response.status_code == 403):
      print("403 错误，可能的原因：")
      print(response.text)
      return
    else:
      return
    
def get_zhaobiao_content(url):
    data = {
        "m": "1"
    }
    # print("request data is: ", data)
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36",
        "content-type": "application/x-www-form-urlencoded",
        "Referer": "https://s.zhaobiao.cn/"
    }
    response = requests.post(url, data=data, headers=headers)
    print("request response status code is:", response.status_code)
    print(response.text)
    if(response.status_code == 200):
      soup = BeautifulSoup(response.text, 'html.parser')
      content = soup.find('div', class_='content')
      return content
    elif(response.status_code == 403):
      print("403 错误，可能的原因：")
      print(response.text)
      return
    else:
      return

def get_zhaobiao_data():
    querywords = ["碳盘查", "碳系统", "绿色供应链", "碳咨询", "碳足迹", "碳达峰", "绿色工厂", "绿色园区", "配额", "碳交易", "CEA", "碳排放权"]
    # querywords = ["碳盘查"]
    dfs = [] #用于存放使用 pandas 的 read_html 函数读取包装后的 HTML
    for queryword in querywords:
        table = request_url(queryword)
        # 检查返回值是否为 None
        if table is None:
            print("对象是 NoneType")
            return       
        # 提取表格数据
        data = []
        rows = table.find_all('tr')
        for row in rows:
          cells = row.find_all('td')
          row_data = [cell.get_text().strip() for cell in cells]
          # 提取出cell里<a>标签的href属性
          for cell in cells:
            link = cell.find('a')
            if link:
              href = link.get('href')
              # print(href)
              # # 延迟1至2秒再去访问下一个网页
              # delay=random.uniform(1, 2)
              # time.sleep(delay)
              # get_zhaobiao_content(href)
              row_data.append(href)
          data.append(row_data)
        # 创建 DataFrame
        df = pd.DataFrame(data)
        dfs.append(df)

        # 延迟2至3秒再去访问下一个网页
        delay=random.uniform(2, 3)
        time.sleep(delay)

    # 使用 ExcelWriter 创建 Excel 文件并指定引擎为 openpyxl（如果要支持多个工作表）
    with pd.ExcelWriter('zhaobiaowang.xlsx', engine='openpyxl') as writer:
      for index, item in enumerate(dfs):
        item.to_excel(writer, sheet_name=querywords[index], index=False)

    # send_email.send_email()

def get_datas():
    print("get_datas called!")
    current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    print("current_time is", current_time)
    get_zhaobiao_data()
    send_email.send_email()
    send_email.pri()

def main():
    # get_datas()
    # 设置每天 17 点发送邮件
    schedule.every().day.at("17:00").do(get_datas)

    while True:
        schedule.run_pending()
        time.sleep(5)
        # current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        # print("current_time is", current_time)
   
if __name__ == "__main__":
    main()