# -*- coding: utf-8 -*-

import os, re, time, sys, queue, random
from bs4 import BeautifulSoup
from lxml import etree
from lxml.etree import XPath
import urllib.request
import requests
import html5lib
import html as ht
import demjson
from urllib.parse import quote
from mysql.MysqlClient import MysqlClient
from txt.TXTClient import TXTClient

'''
公告-基类
'''
class GonggaoBase:

    def __init__(self, fmt):
        self.fmt = fmt
        self.dt = time.strftime(fmt,time.localtime(time.time()))

    def getFile(self, url, headers):
        file_name = url.split('/')[-1]
        res = requests.get(url, headers=headers, stream = False, timeout=20, verify=True)
        with open(file_name, 'wb') as file:
            file.write(res.content)

        print("Sucessful to download" + " " + file_name)

    def downloadFile(self, url, file_name) :
        try :
            res = urllib.request.urlopen(url)
            block_sz = 8192
            with open(file_name, 'wb') as file:#, encoding="utf-8" #binary mode doesn't take an encoding argument
                while True:
                    buffer = res.read(block_sz)
                    if not buffer:
                        break
                    file.write(buffer)
            print("###Sucessful to download" + " " + file_name)
        except Exception as e:
            print(url, file_name, e)

    #读取离线网页
    def readOffHtml(self, file_name):
        try:
            with open(file_name, "rb") as f:
                document = html5lib.parse(f)
        except Exception as e:
            print(e)
        finally:
            return document

    #解析离线网页
    def getOffHtml(self, file_name):
        html=""
        try:
            html = etree.parse(file_name, etree.HTMLParser())
        except Exception as e:
            print(e)
        finally:
            return html

    def request(self, method, url, headers=None, params=None, data=None, json=None):
        try: #使用try except方法进行各种异常处理
            if method.upper() == 'GET':
                res = requests.get(url, headers=headers, params=params, timeout=20, verify=False) #get读取网页源码
            elif method.upper() == 'POST':
                """
                if data!=None and data!='' :
                    headers['Content-Type'] = 'application/x-www-form-urlencoded'
                elif json!=None and json!='' :
                    headers['Content-Type'] = 'application/json'
              """
                res = requests.post(url, headers=headers, data=data, json=json, timeout=20, verify=False) #post读取网页源码

            print(method, url, params, data, json)
            #解码res
            if res.encoding=='utf-8' or res.encoding=='UTF-8':
                res.encoding='UTF-8'
            else:
                m = re.compile('<meta .*(http-equiv="?Content-Type"?.*)?charset="?([a-zA-Z0-9_-]+)"?', re.I).search(res.text)
                if m and m.lastindex == 2:
                    charset = m.group(2).upper()
                    res.encoding=charset
                else:
                    res.encoding='GB2312'
            res.encoding='UTF-8'
            html=res.text
        except requests.HTTPError as err:
            print('HTTPerror, code: %s' % err.code)
        except urllib.request.URLError as err:
            print('URLerror, reason: %s' % err.reason)
        except Exception as e:
            print('@@@', e)
        finally:
            return html

    def requestHtml(self, method, url, headers, **kwargs):
        return self.request(method=method, url=url, headers=headers, **kwargs)

    def deal_file_name(self,  url, file_name):
        if len(file_name) >= 255 :
            file_name = file_name[0:50]
            #文件名特殊字符处理
        file_name = re.sub('[/\\\.:*?"<>|\t \n]', '', file_name)
        if url.find('.') > 0 :
            file_name += "."+ url.split('.')[-1]
        else :
            file_name += ".pdf"
        return file_name

    def getContent(self, el_inner, href_xpath, date_xpath, filename_xpath, convertDataList) :
        try:
            #print(etree.tostring(el_inner, pretty_print=True).decode('utf8'))
            href = href_xpath(el_inner)[0].strip()
            if not href.__contains__('http') :
                href = ht.unescape(href)
                href = quote(href)
            file_href = self.host_url.strip() + href
            file_date = date_xpath(el_inner)[0].strip()
            file_name = file_date + '#' + ht.unescape(filename_xpath(el_inner)[0].strip())
            file_name = self.deal_file_name(href, file_name)
            file_path = os.path.join(os.getcwd(), file_name)
            match = re.findall(r'\.s*html*|\.aspx*', href, re.M|re.I)
            file_content = None
            ""
            if match :
                if self.company_name == '北京城建' :
                    file_path = re.sub('\..+', '.txt', file_path)
                    file_content = self.detail_page(file_href, file_path)
                elif self.company_name == '福星股份' :
                    file_href = self.detail_page(file_href, None)
                    file_name = file_date + '#' + ht.unescape(filename_xpath(el_inner)[0].strip())
                    file_name = self.deal_file_name(file_href, file_name)

            match = re.findall(r'\.pdf|\.txt|\.docx*|\.xlsx*', file_href, re.M|re.I)
            if match :
                #self.downloadFile(file_href, file_name)
                pass
            print(self.id, self.company_name, file_date, file_name, file_path, file_href, file_content)
            par = (self.id, self.company_name, file_date, file_name, file_path, file_href, file_content)
            convertDataList.append(par)
        except Exception as e:
            #print(e)
            pass

    #详细页处理
    def detail_page(self, url, file_path) :
        htmlText = self.requestHtml('GET', url, self.headers)
        #print(htmlText)
        if htmlText==None or htmlText.find('运行时错误') > 0:
            return None
        html = etree.HTML(htmlText)

        try:
            content_xpath = XPath(self.content_xpath)
            content = etree.tostring(content_xpath(html)[0], pretty_print=True).decode('utf8')
            #去掉html标签
            """pattern = re.compile(r'<[^>]+>', re.S)
            result = pattern.sub('', content)
            response = etree.HTML(text=content)
            result = response.xpath('string(.)')"""
            soup = BeautifulSoup(content, 'html.parser')
            result = soup.get_text()
            txt_client.write(file_path, 'w', result)

        except Exception as e:
            print(e)
        finally:
            return result

    #分页处理
    def next_page(self, url, page, convertDataList):
        data = self.data
        if self.data != None and self.data != '' :
            if self.data.find('%') > 0 :
                self.data = self.data % page
            data = demjson.decode(self.data)

        if self.html_file_path != None and self.html_file_path != '':
            html = self.getOffHtml(self.html_file_path)
        else:

            htmlText = self.requestHtml(self.method, url, self.headers, data=data, json=self.json)#params=self.params,
            if htmlText.find('html') <= 0 :
                htmlText = '<html><div>' + htmlText + '</div></html>'
            #print(htmlText)
            if htmlText==None or htmlText.find('</a>') <=0 or htmlText.find('运行时错误') > 0:
                return None
            html = etree.HTML(htmlText)

        try:
            outer_xpath = XPath(self.outer_xpath)
            inner_xpath = XPath(self.inner_xpath)

            href_xpath = XPath(self.href_xpath)
            date_xpath = XPath(self.date_xpath)
            filename_xpath = XPath(self.filename_xpath)

            el_outers = outer_xpath(html)
            for el_outer in el_outers:
                el_inners = inner_xpath(el_outer)
                for el_inner in el_inners:
                    if self.other_xpath!=None and self.other_xpath!='' :
                        other_xpath = XPath(self.other_xpath)
                        el_others = other_xpath(el_inner)
                        for el_other in el_others:
                            self.getContent(el_other, href_xpath, date_xpath, filename_xpath, convertDataList)
                    else :
                        self.getContent(el_inner, href_xpath, date_xpath, filename_xpath, convertDataList)

        except Exception as e:
            print(e)
        finally:
            return convertDataList

    def main(self, queryData):

        for row in queryData:
            self.id, self.company_name, self.index_url, self.content_url, self.host_url, self.html_file_path, self.method, self.headers, self.cookie, self.params, self.data, self.json, self.outer_xpath, self.inner_xpath, self.other_xpath, self.date_xpath, self.filename_xpath, self.href_xpath, self.content_xpath, self.start_page, self.end_page = row
            #print(row)
            #id = row[indexDict.get('id')]
            if self.outer_xpath == None or self.outer_xpath == '':
                continue
            if self.host_url == None :
                self.host_url = ''

            print(self.company_name, '======================>')
            #创建文件目录
            if not os.path.exists(self.company_name) :
                os.mkdir(self.company_name)
            os.chdir(os.path.join(os.getcwd(), self.company_name))
            #headers += str(random.randint(100, 200))
            self.headers = demjson.decode(self.headers)
            if self.cookie != None and self.cookie != '':
                self.headers['Cookie'] = self.cookie
            convertDataList = []

            if self.params == None :
                self.params = '1'
            url_params = re.split(',', self.params)
            param_range = re.split('-', self.params)
            if len(param_range) == 2 :
                url_params = []
                for param in range(int(param_range[0]), int(param_range[1])):
                    url_params.append(param)

            end_page = re.split(',', self.end_page)
            index = 0
            for param in url_params:
                if self.content_url.find('%') > 0 :
                    if self.company_name == '北大资源' :
                        url = self.content_url % ('' if param==1 else '_'+str(param))
                    elif self.company_name == '大名城' :
                        url = self.content_url % ('' if param==1 else '-'+str(param))
                    else :
                        url = self.content_url % param
                else :
                    url = self.content_url

                #分页处理
                for page in range(self.start_page, int(end_page[index])):
                    self.next_page(url, page+1, convertDataList)
                if len(end_page) > 1 :
                    index += 1

            convertRow = len(convertDataList)
            print("转换数：", convertRow)
            if convertRow > 0 :
                # 获取数据库连接
                sql_client = MysqlClient()
                conn, curs = sql_client.conn_istorage()
                #更新记录状态为已经下载完成
                sql = "update company_spider set status='%s' where id=%d " % ('1', self.id)
                rest = sql_client.execute_sql(conn, curs, 'error_sql', [sql])
                print('更新成功数：', rest)

                #批量添加下载的文件信息
                insert_sql = """
                insert into company_spider_file(
                    company_id, company_name, file_date, file_name, file_path, file_href, file_content
                    ) VALUES (%s, %s, %s, %s, %s, %s, %s)
                """
                rest = sql_client.insert_many(conn, curs, insert_sql, convertDataList)
                print('插入成功?', rest)
                conn.close()

    def makeColIndex(self, cols):
        cols = re.sub('[ `]', '', cols)
        cols = re.split(',', cols)
        dict = {}
        index = 0
        for col in cols :
            dict.setdefault(col, index)
            index += 1
        return dict

if __name__ == '__main__':
    #记录执行开始时间
    str_time = int(time.time())
    txt_client = TXTClient()
    # 获取数据库连接
    sql_client = MysqlClient()
    connect, cursor = sql_client.conn_istorage()

    #实例化类
    obj = GonggaoBase('%Y%m%d')

    count_sql = "select count(id) from company_spider where status = '0'"
    cols = "id, company_name, index_url, content_url, host_url, html_file_path, `method`, headers, cookie, params, `data`, json, outer_xpath, inner_xpath, other_xpath, date_xpath, filename_xpath, href_xpath, content_xpath, start_page, end_page"
    query_sql = "select " + cols + " from company_spider where status = '0' limit %d, %d"
    #indexDict = obj.makeColIndex(cols)

    totalCount = sql_client.count_data(connect, cursor, count_sql)[0]
    print("总记录数", totalCount)
    if totalCount == 0 :
        exit(1)

    pageSize = 1000
    pages = int((totalCount-1)/pageSize) + 1
    row = 0
    fileName = "error_sql"
    for pageNum in range(pages):
        #break
        query_sql_tmp = query_sql %((pageNum * pageSize), pageSize)
        #print(query_sql_tmp)
        queryResult = sql_client.query_data(connect, cursor, query_sql_tmp)
        print("记录数：", len(queryResult))
        convertList = obj.main(queryResult)

    connect.close()

    end_time = int(time.time())
    print("程序执行时间（秒）：", end_time - str_time)