# -*- coding: utf-8 -*-

import os, re, time
import demjson
import html as ht
from lxml import etree
from lxml.etree import XPath
from urllib.parse import quote
from spider.gonggao.GonggaoBase import GonggaoBase

from mysql.MysqlClient import MysqlClient
from txt.TXTClient import TXTClient

'''
公告-德信地产
'''
class Gonggao(GonggaoBase):

    def __init__(self, fmt):
        #super(Gonggao, self).__init__(fmt)  # 在python2中，需要super(自己的类名,self)来调用父类中的属性
        GonggaoBase.__init__(self, fmt)#用GonggaoBase调用__init__的方式重用父类功能

    #分页处理
    def next_page(self, url, page, convertDataList):
        response = self.requestHtml(self.method, url, self.headers, json=self.json)
        response = demjson.decode(response)
        #print(response)

        msg_tag = self.outer_xpath
        data_tag = self.inner_xpath
        date_tag = self.date_xpath
        name_tag = self.filename_xpath
        href_tag = self.href_xpath

        if response.get(msg_tag).find('成功') <= 0 :
            return None

        for row in response.get(data_tag):
            #print(row)
            try:
                href = row.get(href_tag).strip()
                if not href.__contains__('http') :
                    href = ht.unescape(href)
                    href = quote(href)
                file_href = self.host_url.strip() + href
                file_date = row.get(date_tag).strip()
                file_name = file_date + '#' + row.get(name_tag).strip()
                file_name = self.deal_file_name(href, file_name)
                file_path = os.path.join(os.getcwd(), file_name)
                match = re.findall(r'\.s*html*|\.aspx*', href, re.M|re.I)
                file_content = None
                if match :
                    file_path = re.sub('\..+', '.txt', file_path)
                    file_content = self.detail_page(file_href, file_path)
                else :
                    #self.downloadFile(file_href, file_name)
                    pass
                print(self.id, self.company_name, file_date, file_name, file_path, file_href, file_content)
                par = (self.id, self.company_name, file_date, file_name, file_path, file_href, file_content)
                convertDataList.append(par)
            except Exception as e:
                print(e, self.id, self.company_name, file_date, file_name, file_path, file_href, file_content)
                pass

if __name__ == '__main__':
    #记录执行开始时间
    str_time = int(time.time())
    txt_client = TXTClient()
    # 获取数据库连接
    sql_client = MysqlClient()
    connect, cursor = sql_client.conn_istorage()

    #实例化类
    obj = Gonggao('%Y%m%d')

    cols = "id, company_name, index_url, content_url, host_url, html_file_path, `method`, headers, cookie, params, `data`, json, outer_xpath, inner_xpath, other_xpath, date_xpath, filename_xpath, href_xpath, content_xpath, start_page, end_page"
    query_sql = "select " + cols + " from company_spider where status = '0' and id = %d" % (11)
    #indexDict = obj.makeColIndex(cols)

    queryResult = sql_client.query_data(connect, cursor, query_sql)
    print(type(queryResult))
    convertList = obj.main(queryResult)

    connect.close()

    end_time = int(time.time())
    print("程序执行时间（秒）：", end_time - str_time)
