from bs4 import BeautifulSoup

import requests
import csv
import bs4
import time
from datetime import datetime
import mysql.connector
from decimal import Decimal

import json
import os
import sys
import io
import xlrd
from xlrd import xldate_as_tuple

from selenium import webdriver
from selenium.webdriver.chrome.options import Options

'''

抓取欧盟价格数据，先获取excel再读取excel
欧盟委员会猪胴体价格网址：
https://ec.europa.eu/info/food-farming-fisheries/farming/facts-and-figures/markets/overviews/market-observatories/meat/pigmeat-statistics_en

'''

chrome_path ='/usr/bin/chromedriver'
# chrome_path = 'C:\Program Files\chrome\chromedriver.exe'
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')

# 创建链接数据库
config_168 = {'host': '127.0.0.1',  # 默认127.0.0.1
          'user': 'root', 'password': 'Tnq39/*riqJcC', 'port': 3306,  # 默认即为3306
          'database': 'ccii_redesign', 'charset': 'utf8','auth_plugin':'mysql_native_password'  # 默认即为utf8
          }

config1 = {'host': 'rm-2zefd6473rz062234.mysql.rds.aliyuncs.com',  # 默认127.0.0.1
          'user': 'prod_ccii', 'password': 'cI1546_wodesecerts', 'port': 3306,  # 默认即为3306
          'database': 'ccii_prod', 'charset': 'utf8', 'auth_plugin':'mysql_native_password'  # 默认即为utf8
          }

config = {'host': 'rm-2zefd6473rz062234.mysql.rds.aliyuncs.com',  # 默认127.0.0.1
          'user': 'prod_python', 'password': '939_58J6kAW)P&^', 'port': 3306,  # 默认即为3306
          'database': 'ccii_prod', 'charset': 'utf8'  # 默认即为utf8
          }


path = "/opt/excel_price/"


# 爬取资源
def get_contents(rs_page):

    fileName = "pig-weekly-prices-eu_en.xlsx"
    # path = "F:/excel_price/"
    data = []

    try:

        if not os.path.exists(path):
            os.makedirs(path)

        soup = BeautifulSoup(rs_page, 'lxml')
        excel_dates = soup.find_all("div", class_="field file__date paragraph--xsmall paragraph--strong")
        # print(excel_dates)
        lxlsDate = excel_dates[0].string
        print("lxlsDate=" + lxlsDate)
        pathFullName = path + lxlsDate + "-" + fileName

        if os.path.exists(pathFullName):
            print("未更新数据无需爬取")
            return

        trs = soup.find_all('a', title = "pig-weekly-prices-eu_en.xlsx")
        # print(trs)
        hrefUrl = trs[0].get('href')
        print("hrefUrl=" + hrefUrl)
        result = requests.get(hrefUrl, timeout=30)
        result.raise_for_status()
        excelFile = result.content

        if result.status_code == 200:
            time.sleep(0.5)
            with open(pathFullName, 'wb') as file:
                file.write(excelFile)
                # time.sleep(1)
                file.close()
        else:
            print("请求返回状态码: " + str(result.status_code))

        # 读取excel中的数据,并简单处理
        data = readExcel(pathFullName)

    except Exception as e:
        print(f'解析数据异常 {e}')

    return data


# 读取excel中的文件
def readExcel(filePath):
    # 打开文件
    data = xlrd.open_workbook(filePath)

    # 查看工作表
    data.sheet_names()
    print("sheets：" + str(data.sheet_names()))

    # 通过文件名获得工作表,获取工作表1
    table = data.sheet_by_name('Class S')

    # 打印data.sheet_names()可发现，返回的值为一个列表，通过对列表索引操作获得工作表1
    # table = data.sheet_by_index(0)

    # 获取行数和列数
    # 行数：table.nrows
    # 列数：table.ncols
    print("总行数：" + str(table.nrows))
    print("总列数：" + str(table.ncols))
    data = []

    # print(table.cell(363, 0).value)
    # 注意N周后要修改
    for rowNum in range(342, 5000):
        item_de = []
        item_es = []
        date_value = table.cell(rowNum, 0).value
        de_value = table.cell(rowNum, 9).value
        es_value = table.cell(rowNum, 12).value
        if not date_value == '' :
            # ctype = table.cell(rowNum, 0).ctype  # 表格的数据类型
            date = datetime(*xldate_as_tuple(date_value, 0))
            cell = date.strftime('%Y-%m-%d')
            # 日期 0
            item_de.append(cell)
            item_es.append(cell)
            # 价格 1
            item_de.append(format(de_value, '.2f'))
            item_es.append(format(es_value, '.2f'))
            # 国家英文 2
            item_de.append("DE")
            item_es.append("ES")
            # 国家中文 3
            item_de.append("德国")
            item_es.append("西班牙")
            # 币种 4
            item_de.append("EUR")
            item_es.append("EUR")
            # 单位 5
            item_de.append("欧元/100kg")
            item_es.append("欧元/100kg")

            data.append(item_de)
            data.append(item_es)
        else:
            break

    print(data)

    return data


# 用浏览器打开网页
def open_chrome(url):
    browser = webdriver.Chrome(executable_path=chrome_path, options=chrome_options)
    browser.get(url)  # 执行了打开网页操作
    # print(browser.page_source)# 输出了网页源代码
    # browser.close()  # 关闭了浏览器
    return browser.page_source

def work(reqUrl):
    print(" 开始爬取数据" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
    data = []
    price_data = []

    print(reqUrl)
    rs = open_chrome(reqUrl)
    # print(rs)
    data = get_contents(rs)

    # data = readExcel(filePath)

    if data :
        price_data = handle_price(data)
    else:
        print("没有数据不更新")

    # 保存到数据库
    if  price_data :
        save(price_data)
    print("爬取数据完成！" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))



# 处理价格数据
def handle_price(data):
    for item in data:
        rate = get_rate(item[0], item[4])
        if rate != 0 and rate is not None:
            # 欧元/100kg
            fob_price = format(Decimal(item[1]) * Decimal(rate)/100, ".2f")
            # print("fob_price=" + fob_price)
            # CIF价格（元/KG）=FOB出厂价格(元/KG)+海运费（2000*7.1/25000  元/KG）+海运保险费（FOB出厂价格*0.05% 元/KG）
            cif_price = format(float(fob_price) + float(2000*7.1/25000) + float(fob_price)*0.0005, ".2f")
            # print("cif_price=" + cif_price)
            item.append(fob_price)
            item.append(cif_price)
            item.append(rate)
            print(item)

    return data




    return s1

# 查询数据库得到汇率
def get_rate(priceDate, currencyType):
    cnn = ''
    rate = 0
    try:
        cnn = mysql.connector.connect(**config)  # connect方法加载config的配置进行数据库的连接，完成后用一个变量进行接收
    except mysql.connector.Error as e:
        print('数据库链接失败！', str(e))
    else:  # try没有异常的时候才会执行
        print("sucessfully!")

    # 插入数据库
    cursor = cnn.cursor(buffered=True)  # 获取插入的标记位
    try:

        # 第三种：可以一次插入多条，效率比一条条插高,用的方法是executemany 猪牛羊 1 2 3
        stmt = "select rate_value from alarm_rate_cny where worm_date like \"%" + priceDate + "%\"" + "and rate_en_name = \'"  + (currencyType + "CNY\'")
        # print(stmt)
        # for item in data:
        #     print(item)
        cursor.execute(stmt)
        rate = cursor.fetchone()[0]
        cnn.commit()
        print(rate)
        print("查询数据成功！")

    except mysql.connector.Error as e:
        print('查询数据报错！', str(e))
    finally:  # 无论如何都会执行下面的语句
        cursor.close()  # 关闭标记位
        cnn.close()  # 关闭数据库链接

    return rate



# 保存到数据库
def save(data):
    cnn = ''
    try:
        cnn = mysql.connector.connect(**config)  # connect方法加载config的配置进行数据库的连接，完成后用一个变量进行接收
    except mysql.connector.Error as e:
        print('数据库链接失败！', str(e))
    else:  # try没有异常的时候才会执行
        print("sucessfully!")

    # 插入数据库
    cursor = cnn.cursor(buffered=True)  # 获取插入的标记位
    try:

        # 第三种：可以一次插入多条，效率比一条条插高,用的方法是executemany 猪牛羊 1 2 3
        stmt = 'insert into alarm_intl_pig_price(name, price_date, price, country_en, country_cn, currency_type, unit, price_fob,  price_cif, rate, create_date, del_flag) values ("' + str('猪胴体') +'", %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW(), 0)'
        # for item in data:
        #     print(item)
        # cursor.execute(stmt, item)
        cursor.executemany(stmt, data)
        cnn.commit()
        print("插入数据成功！")

    except mysql.connector.Error as e:
        print('插入数据报错！', str(e))
    finally:  # 无论如何都会执行下面的语句
        cursor.close()  # 关闭标记位
        cnn.close()  # 关闭数据库链接


url = "https://ec.europa.eu/info/food-farming-fisheries/farming/facts-and-figures/markets/overviews/market-observatories/meat/pigmeat-statistics_en"

# 测试用
filePath = "F:\excel_price\\28 October 2020-pig-weekly-prices-eu_en.xlsx"

if __name__ == '__main__':
    work(url)
    # get_rate("2020-11-16", "EUR")







