import requests,datetime,db,sys
from bs4 import BeautifulSoup
"""
功能描述：爬取贵州省公共资源交易中心新闻网站-政府采购数据
channelId：jygkcgxm-交易公告，jygkzsjg-交易结果公示，jygkcgzs-流标公示，jygkzbgg-资格结果公示，jygggzsx-项目澄清，
"""

class tradeNotice(object):
    def __init__(self):
        self.pageUrl = 'http://www.gzsggzyjyzx.cn/%s/index_%d.jhtml'
        self.info_type = {'jygkcgxm':'交易公告', 'jygkzsjg': '交易结果公示', 'jygkcgzs':'流标公示', 'jygkzbgg':'资审结果公示', 'jygggzsx':'项目澄清'} #信息类型
    """
    功能描述：根据网页url获取html内容
    """
    def getHTMLText(self, url):
        try:
            req = requests.get(url = url)
            return req.text
        except:
            return ""

    """
    获取当前拥有的页数
    """
    def getPage(self, url):
        html = self.getHTMLText(url)
        bf = BeautifulSoup(html, 'html.parser')
        pagesList = bf.find_all('ul', class_='pages-list')
        pageLi = pagesList[0].li.a.string
        return [int(pageLi[pageLi.find('/') + 1:pageLi.find('页')]), int(pageLi[pageLi.find('共') + 1:pageLi.find('条')])]

    """
    功能描述：根据url获取地址列表
    """
    def getDownUrls(self, url):
        html = self.getHTMLText(url)
        bf = BeautifulSoup(html, 'html.parser')
        listBox = bf.find_all('ul', id='listbox')
        ass = listBox[0].find_all('a')
        urls = []
        for a in ass:
            urls.append(a['href'])
        return urls
    """
    功能描述：获取内容
    """
    def getContent(self, url):
        html = self.getHTMLText(url)
        bf = BeautifulSoup(html, 'html.parser')

        #获取内容
        htmlContent = str(bf)
        replaceHtml = htmlContent.replace('</div></div></div></div></body></html>', '').replace('<div class="clear">', '<div class="clear"></div>')
        bfs = BeautifulSoup(replaceHtml, 'html.parser')
        content = bfs.find_all('div', class_='detail_box')[0]

        #获取标题
        title = bf.find_all('div', class_='article_head')[0].h2.string

        #获取来源
        source = bf.find_all('div', class_='article_subtitle')[0].find_all('span')
        #发布日期
        time = str(source[0].contents[0]).replace('发布时间：', '')
        #来源
        sourceName = str(source[1].contents[0]).replace('来源：', '')
        result = (title, str(content), time, sourceName)
        return result

    """
    功能描述：执行
    """
    def executeSql(self, sql, param):
        try:
            with self.connection.cursor() as cursor:
                cursor.execute(sql, param);
            self.connection.commit()

        finally:
            self.connection.close();
if __name__ == '__main__':
    tn = tradeNotice()
    connection = db.getMysql()
    redis = db.getRedis()
    cursor = connection.cursor()
    try:
        for infoType in tn.info_type.keys():
            # 获得每个页面的地址，用来获取每个类型的页数
            pr = tn.pageUrl % (infoType, 1)
            pageObj = tn.getPage(pr)  # 获取当前的总页数
            pages = pageObj[0]
            total = pageObj[1]
            currentPage = 1
            currentNum = 0
            print('开始爬取：' + tn.info_type[infoType]  + '，共' + str(total) + '条...')
            while currentPage <= pages:
                url = tn.pageUrl % (infoType, currentPage)  # 每页的数据
                urls = tn.getDownUrls(url)  # 获取每页的内容页面
                for u in urls:
                    exit = redis.get(u)
                    if not exit:
                        result = tn.getContent(u)  # 获取内容
                        re1 = (
                        tn.info_type[infoType], datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), u,
                        'B')
                        newResult = result + re1
                        sql = 'INSERT INTO tb_trade_info (title, content, issue_time, source_name, info_type, create_time, original_link, module_class) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)'
                        with connection.cursor() as cursor:
                            cursor.execute(sql, newResult);
                        connection.commit()
                        redis.set(u, u)
                        currentNum = currentNum + 1
                        sys.stdout.write(tn.info_type[infoType] + '，共' + str(total) + '条，正下载第' + str(
                            currentNum) + '条，已完成%.2f%%' % float(currentNum / total * 100) + '\r')
                        sys.stdout.flush()
                currentPage = currentPage + 1
            else:
                print('信息类型：' + infoType  + '下载完毕')
    except Exception as e:
        print(e)
    finally:
        connection.close();
    # page = tn.getPage('http://www.gzsggzyjyzx.cn/jygkcgxm/index_1.jhtml')
    # print(page)
    #获取每页的列表
    # url = 'http://www.gzsggzyjyzx.cn/jygkcgxm/index_1.jhtml'
    # urls = tn.getDownUrls(url)
    # print(urls)

    #获取内容
    # url = 'http://www.gzsggzyjyzx.cn/jygkcgxm/69289.jhtml'
    # result = tn.getContent(url)
    # print(result)
    # connection = tn.getMySqlConnect()
    # sql = 'INSERT INTO tb_trade_info (title, content, info_type, source_name, trade_type) VALUES (%s, %s, %s, %s, %s)'
    # params = ('Zhyea', 'tomorrow', 'M', 'asdf', 'aadf')
    # print(params)
    # tn.executeSql(sql, params)
    # print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))