from bs4 import BeautifulSoup

import requests
import csv
import bs4
import time
import mysql.connector

from datetime import timedelta, datetime
from selenium import webdriver
from selenium.webdriver.chrome.options import Options


chrome_path ='/usr/bin/chromedriver'
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
# 创建链接数据库
config_168 = {'host': '127.0.0.1',  # 默认127.0.0.1
          'user': 'root', 'password': 'Tnq39/*riqJcC', 'port': 3306,  # 默认即为3306
          'database': 'ccii_redesign', 'charset': 'utf8','auth_plugin':'mysql_native_password'  # 默认即为utf8
          }

config1 = {'host': 'rm-2zefd6473rz062234.mysql.rds.aliyuncs.com',  # 默认127.0.0.1
          'user': 'prod_ccii', 'password': 'cI1546_wodesecerts', 'port': 3306,  # 默认即为3306
          'database': 'ccii_prod', 'charset': 'utf8', 'auth_plugin':'mysql_native_password'  # 默认即为utf8
          }

config = {'host': 'rm-2zefd6473rz062234.mysql.rds.aliyuncs.com',  # 默认127.0.0.1
          'user': 'prod_python', 'password': '939_58J6kAW)P&^', 'port': 3306,  # 默认即为3306
          'database': 'ccii_prod', 'charset': 'utf8'  # 默认即为utf8
          }


'''
国内白虾
惠农网价格网址：
https://www.cnhnb.com/hangqing/cd-2001440-2620-0-3059/
无数据

抓取合肥网站数据
http://www.hfzgncp.com.cn/price.php?class_id=104102103&s1=2020-11-11&s2=%E7%99%BD%E5%AF%B9%E8%99%BE#xx

'''
# 获取合肥价格数据

# 爬取资源
def get_contents(rurl):
    data = []
    try:
        soup = BeautifulSoup(rurl, 'lxml')
        trs = soup.find_all('dd', class_ = "clearfix")
        # print(trs[0])
        # minPrice = trs[0].find_all("div", class_="s3")[0].string
        # maxPrice = trs[0].find_all("div", class_="s4")[0].string
        avgPrice = trs[0].find_all("div", class_="s5")[0].string
        priceDate = trs[0].find_all("div", class_="s6")[0].string
        # 元/500克
        data.append(format(float(avgPrice) * 2, ".2f"))
        data.append(str(priceDate))

    except Exception as e:
        print(f'解析数据异常 {e}')

    return data



# 获取最大页数
def get_maxPage(rsData):
    pageNum = 0
    try:
        soup = BeautifulSoup(rsData, 'lxml')
        div = soup.find_all('div', 'new_page4')
        aTags = div[0].find_all('a')

        if len(aTags) > 1:
            pageNum = int(aTags[-2].text)
        elif len(aTags) == 1:
            # 只有1页数据
            return 1
        else:
            print("获取页码为0")
    except:
        print('解析数据异常')
    print("一共有{}页数据".format(str(pageNum)))
    return pageNum


# 用浏览器打开网页
def open_chrome(url):
    browser = webdriver.Chrome(executable_path=chrome_path, options=chrome_options)
    browser.get(url)  # 执行了打开百度网页操作
    # print(browser.page_source)# 输出了网页源代码
    # browser.close()  # 关闭了浏览器
    return browser.page_source

def main(req_url):

    print("开始爬取数据" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))

    print(req_url)
    rs = open_chrome(req_url)
    # print(rs)
    rs_data = get_contents(rs)
    print(rs_data)

    if rs_data :
        # 价格 - 日期
        save(rs_data)
    else:
        print("没有数据")

    print("爬取数据完成！" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))

# 保存到数据库
def save(data):
    cnn = ''
    try:
        cnn = mysql.connector.connect(**config)  # connect方法加载config的配置进行数据库的连接，完成后用一个变量进行接收
    except mysql.connector.Error as e:
        print('数据库链接失败！', str(e))
    else:  # try没有异常的时候才会执行
        print("sucessfully!")

    # 插入数据库
    cursor = cnn.cursor(buffered=True)  # 获取插入的标记位
    try:

        # 第三种：可以一次插入多条，效率比一条条插高,用的方法是executemany 猪牛羊 1 2 3
        stmt = 'insert into shop_price_daily_info(price, price_date, name, province_name, market_name, type, created_date, last_modified_date, version, copy_data) ' \
               'values (%s, %s, "白对虾", "安徽合肥", "合肥周谷堆大兴农产品国际物流园", 5050, NOW(), NOW(), 0, 0)'

        # for item in data:
        #     print(item)
        # cursor.execute(stmt, item)
        cursor.execute(stmt, data)
        cnn.commit()
        print("插入数据成功！")

    except mysql.connector.Error as e:
        print('插入数据报错！', str(e))
    finally:  # 无论如何都会执行下面的语句
        cursor.close()  # 关闭标记位
        cnn.close()  # 关闭数据库链接

def handle_url():
    url = base_url + time.strftime("%Y-%m-%d", time.localtime()) + suffix_url


base_url = "http://www.hfzgncp.com.cn/price.php?class_id=104102103&s1="
suffix_url = "&s2=%E7%99%BD%E5%AF%B9%E8%99%BE#xx"

#url = "http://www.hfzgncp.com.cn/price.php?class_id=104102103&s1=2020-11-11&s2=%E7%99%BD%E5%AF%B9%E8%99%BE#xx"

if __name__ == '__main__':
        yesterday = datetime.today() + timedelta(-1)
        yesterday_str = yesterday.strftime('%Y-%m-%d')

        url = base_url + yesterday_str + suffix_url
        main(url)









