import requests
from bs4 import BeautifulSoup
import pymysql
import datetime
from lxml.cssselect import etree
import re


#mysql 密码：123456
# weekno = datetime.datetime.now().isocalendar()[1]
# print(weekno)
# weekNo = '国内主要油厂大豆压榨预估调查统计（2019年第'+str(weekno)+'周）'
# print(weekNo)
#
# page = requests.get("https://www.myagric.com/article/p-4275----070301---------1.html")
# demo = page.text
# soup = BeautifulSoup(demo,"html.parser")
# for k in soup.find_all('a'):
#     if (k.string == weekNo):
#         beanHref = k.get('href')
#         print(beanHref)
#
#
# beanHref = "https:" + beanHref
# print(beanHref)
#
# page1 = requests.get(beanHref)
# demo1 = page1.text

# etree1 = etree.HTML(demo1,etree.HTMLParser())
# result = etree1.xpath("/html/body/div[@class='wrap']/div[@class='main']/div[@class='articleBox']/div[@class='content article-content']/text()")  #xpath后面要加/text()，获取内容
# resultStr = str(result)
#正则表示式提取数据
# pattern = "\d+\.\d+"
# reResult = re.findall(pattern=pattern,string=resultStr)
#
# estimatedPressCapacity = reResult[0]
# startUpRate = reResult[1]
# preWeekPressCapacity = reResult[2]
# offSet = reResult[3]
# preWeekStartUpRate = reResult[4]
#
# mysql = pymysql.connect("172.31.162.131","root","123456","future")
# cursor = mysql.cursor()
# sql = 'insert into bean_startup values({year},{weekNo},{estimatedPressCapacity},{startUpRate},{preWeekPressCapacity},{offSet},{preWeekStartUpRate})'.format(
#     year=year,weekNo=weekNo,estimatedPressCapacity=estimatedPressCapacity,startUpRate=startUpRate,preWeekPressCapacity=preWeekPressCapacity,offSet=offSet,preWeekStartUpRate=preWeekStartUpRate
# )
# try:
#     cursor.execute(sql)
#     mysql.commit()
#     print("提交成功")
# except:
#     mysql.rollback()
#
# mysql.close()

month = datetime.date.today().strftime('%m')
month = month+'月'
day = datetime.date.today().strftime('%d')
yesterday = int(day) - 1
day = str(yesterday)+'日'
date = month+day
week = datetime.date.isocalendar(datetime.date.today())[1] - 1

class Future():

    matchStr = ""
    url = ""
    xpathPattern = ""
    regularPattern = "\d+\.\d+|\d+"
    sql = ""
    rePattern = ""

    def __init__(self):
        pass


    def getHref(self,tagString,url):
        """
        :param url:项的页面地址
        :return: 进入获取数据的页面url
        """
        page = requests.get(url)
        pageText = page.text
        pageSoup = BeautifulSoup(pageText,"html.parser")
        for tag in pageSoup.find_all('a'):
            if (tag.string == tagString):
                hreftemp = tag.get('href')
        hreftemp = "https:" + hreftemp
        return hreftemp

    def geturl(self,tagString,url):
        """
        :param url:项的页面地址
        :return: 进入获取数据的页面url
        """
        page = requests.get(url)
        pageText = page.text
        pageSoup = BeautifulSoup(pageText,"html.parser")
        for tag in pageSoup.find_all('a'):
            if (tag.string == tagString):
                hreftemp = tag.get('href')
        return hreftemp

    def getxpathdata(self,url,xpathPattern):
        """
        :param url:通过gethref函数得到的url
        :param xpathPattern: xptah表达式
        :return: 获得具体数据
        """
        page = requests.get(url)
        pageText = page.text
        etreepage = etree.HTML(pageText,etree.HTMLParser())
        result = etreepage.xpath(xpathPattern)
        resultStr = str(result)
        return  resultStr

    def getsearchData(self,url,rePattern):
        """
        :param url: 地址
        :param rePattern: 正则表达式
        :return:
        """
        page = requests.get(url)
        pageText = page.text
        reResult = re.search(rePattern,pageText)
        return reResult


    def getRegularData(self,xpathData,regularPattern):
        """
        :param xpathData: 通过getxpathdata函数得到的数据
        :param regularPattern: 正则表达式
        :return: 最终获得的数据
        """
        resultData = re.findall(pattern=regularPattern, string=xpathData)
        return resultData

    def saveDataToTxt(self,resultStr,target):
        toTxt = str(resultStr)
        with open(target, "a+") as f:
            f.write(toTxt)



class MySqlOperation():

    def __init__(self,host,user,passwd,db,port,chartset='utf-8'):
        try:
            self.con = pymysql.connect(host=host,user=user,passwd=passwd,port=port,db=db)
            self.con.autocommit(True)
        except BaseException as e:
            print(e)
            exit(1)

    #exec sql
    def executeSql(self,sql,args=None):
        try:
            cursor = self.con.cursor()
            cursor.execute(sql)
        except Exception as e:
            print(e)

    #query one data
    def queryOneData(self,sql,args=None):
        try:
            cursor = self.con.cursor()
            cursor.execute(sql,args)
            return cursor.fetchone()
        except Exception as e:
            print(e)

    #query many data
    def queryManyData(self,sql,args=None):
        try:
            cursor = self.con.cursor()
            cursor.execute(sql,args)
            return cursor.fetchall()
        except Exception as e:
            print(e)
    #close mysql
    def close(self):
        self.con.close()


"""
礼拜六执行任务，统计的上礼拜的数据
"""


if __name__ == "__main__":
    year = datetime.datetime.now().isocalendar()[0]
    #如果这周是第一周，获取上年最后一周的数据
    if week == 1:
        year = year - 1
        week = 52



    print("=========  获取大豆库存  =========")  #大豆库存是礼拜五统计的
    storeTagString = '全国主要地区大豆及豆粕库存统计（' + str(year) + '年' + '第' + str(week) + '周' +'）'
    beanStore = Future()
    beanStore.url = "https://www.myagric.com/article/p-4118----07010401---------1.html" # 大豆库存主页面
    try:
        storeHref = beanStore.getHref(storeTagString, beanStore.url)
    except Exception as e:
        print("大豆库存地址错误,请检查网站或者名称是否正确。,原因: {}".format(e))
    beanStore.xpathPattern = "/html/body/div[1]/div[1]/div[1]/div[2]/text()"
    bean_resultStr = beanStore.getxpathdata(url=storeHref, xpathPattern=beanStore.xpathPattern)  # 获取数据
    # resultData = beanStore.getRegularData(bean_resultStr,beanStore.regularPattern)
    bean_store = re.search("大豆库存([+-]?[0-9]+(\.[0-9]+)?)", bean_resultStr)
    print(bean_store)
    bean_store = re.search("\d+\.\d+|\d+",bean_store.group())
    beanStore.sql = 'insert into bean_store values({year},{week},{bean_store})'.format(
     year=year,week=week,bean_store=bean_store.group())
    print(bean_store.group())

    print("=========  获取豆粕库存  =========")  # 豆粕库存是礼拜五统计的
    bean_meal_store = re.search("豆粕库存为([+-]?[0-9]+(\.[0-9]+)?)", bean_resultStr)
    bean_meal_store = re.search("\d+\.\d+|\d+", bean_meal_store.group())
    beanStore.sql = 'insert into bean_meal_store values({year},{week},{bean_meal_store})'.format(
        year=year, week=week, bean_meal_store=bean_store.group())
    print(bean_store.group())

    print("=========  获取大豆开机率  =========")  #礼拜2统计的,预估值。
    startUpTagString = '全国主要油厂大豆压榨预估调查统计（' + str(year) + '年第' + str(week) + '周）'  # 豆粕开机率的匹配名
    beanStartUp = Future()
    beanStartUp.url = "https://www.myagric.com/article/p-4275----07010401---------1.html"  # 豆粕开机率主页面
    try:
        startUpHref = beanStartUp.getHref(startUpTagString, beanStartUp.url)  # 获取数据的href
    except Exception as e:
        print("大豆开机率地址错误,请检查网站或者名称是否正确。原因: {}".format(e))
    beanStartUp.xpathPattern = "/html/body/div[@class='wrap']/div[@class='main']/div[@class='articleBox']/div[@class='content article-content']/text()"
    bean_press_resultStr = beanStartUp.getxpathdata(url=startUpHref,xpathPattern=beanStartUp.xpathPattern)
    resultData = beanStartUp.getRegularData(bean_press_resultStr,beanStartUp.regularPattern)
    bean_press = re.search("实际压榨量为([+-]?[0-9]+(\.[0-9]+)?)", bean_press_resultStr)
    bean_press = re.search("\d+\.\d+|\d+", bean_press.group())
    bean_start_up = resultData[-1]
    print(bean_start_up)

    print("=========  获取油脂库存数据  =========")
    print("=========  获取豆油库存数据  =========")
    bean_oil = Future()
    bean_oil.url = "https://www.myagric.com/article/p-4118----07010501---------1.html"  #豆油库存页面
    # bean_oil_store = re.search("全国重点地区豆油商业库存约([+-]?[0-9]+(\.[0-9]+)?)万吨",)
    bean_oil.rePattern = "全国重点地区豆油商业库存约([+-]?[0-9]+(\.[0-9]+)?)万吨"
    try:
        bean_oil_stroe = bean_oil.getsearchData(bean_oil.url,bean_oil.rePattern)
    except Exception as e:
        print("找不到豆油库存数据,原因: {}".format(e))
    bean_oil_stroe = bean_oil_stroe.group()
    bean_oil_stroe = re.search("([+-]?[0-9]+(\.[0-9]+)?)",bean_oil_stroe).group()
    print(bean_oil_stroe)

    print("=========  获取棕榈油库存数据  =========")
    maripa_oil = Future()
    maripa_oil.url = "https://www.myagric.com/article/p-4118----07010502---------1.html"
    maripa_oil.rePattern = "全国重点地区棕榈油商业库存约([+-]?[0-9]+(\.[0-9]+)?)万吨"
    try:
        maripa_oil_store = maripa_oil.getsearchData(maripa_oil.url,maripa_oil.rePattern)
    except Exception as e:
        print("找不到棕榈油库存数据,原因: {}".format(e))
    maripa_oil_store = maripa_oil_store.group()
    maripa_oil_store = re.search("([+-]?[0-9]+(\.[0-9]+)?)",maripa_oil_store).group()
    print(maripa_oil_store)



    # print("=========  保存数据到Mysql  =========")
    # mysql = MySqlOperation(host="10.10.134.58", user="root", passwd="Yanshi@123", db="future", port=3306)
    # mysql.executeSql(beanStore.sql)
    # mysql.close()

    # print("=========  保存数据到Mysql  =========")
    # mysql = MySqlOperation(host="172.31.162.131",user="root",passwd="123456",db="future",port=3306)  #连接数据库
    # mysql.executeSql(beanStartUp.sql)
    # mysql.executeSql(beanStore.sql)
    # mysql.executeSql(beanShip.sql)
    # mysql.executeSql(rapeseedMeal.sql)
    # mysql.close()  #关闭数据库连接

