import os
import sys
import json
import urllib
import urllib.request

import shutil
import uuid

import re
import hashlib

import time
import io
import gzip

from bs4 import BeautifulSoup

dirname, filename = os.path.split(os.path.abspath(__file__)) 
# os.path.realpath(__file__)

def _HttpRequest(url, headers={}, bodyData=None, method='GET'):
    UserAgent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'
    AcceptLanguage = 'zh-CN,zh;q=0.9'
    Connection = 'keep-alive'

    if 'User-Agent' not in headers:
        headers['User-Agent'] = UserAgent

    if 'Accept-Language' not in headers:
        headers['Accept-Language'] = AcceptLanguage

    if 'Connection' not in headers:
        headers['Connection'] = Connection

    req = urllib.request.Request(url, headers=headers, data=bodyData, method=method)

    try:
        response = urllib.request.urlopen(req)
        code = response.getcode() # 状态码
        url = response.geturl() # 访问地址
        info = response.info() # 响应头
        data = response.read() # 数据
        # print(code)
        # print(url)
        # print(info)
        # print(data)

        if ('Content-Encoding' in info and info['Content-Encoding'] == 'gzip') or ('content-encoding' in info and info['content-encoding'] == 'gzip'):
            compressedStream = io.BytesIO(data)
            gziper = gzip.GzipFile(fileobj = compressedStream)  
            data = gziper.read()

            content =  data.decode('ascii')
        else:
            encoding = response.headers.get_content_charset() or ''
            if encoding.lower() == 'gb2312':
                encoding = 'gb18030'

            content = data
            if encoding != '':
                content =  data.decode(encoding)

        return {'code': code, 'headers': info, 'data': content}
    except Exception as e:
        # TODO 打印详细错误信息，不同错误类型细化分开处理
        print(e)
        return {'code': 500, 'headers': {}, 'data': 'Error {0}'.format(str(e))}

def _requestHtmlParse(url, parseResult):
    result = {}

    resp = _HttpRequest(url)
    if resp['code'] == 200:
        html = resp['data']
        sp = BeautifulSoup(html, 'html.parser')

        for key in parseResult:
            parseRule = parseResult[key] if key in parseResult else ''

            if parseRule == '':
                continue

            try:
                result[key] = ''

                itemList = sp.select(parseRule)
                for item in itemList:
                    result[key] += item.get_text()

                if result[key] == '':
                    del result[key]
            except Exception as e:
                info = sys.exc_info()
                print(url, parseRule, info[1], info[2].tb_lineno)

    return result

def _saveData(spiderData, saveFolder):
    primaryKey = spiderData['primaryKey']
    primaryKey = primaryKey.replace('\r', ' ')
    primaryKey = primaryKey.replace('\n', ' ')
    primaryKey = primaryKey.replace('\t', ' ')

    primaryKey = primaryKey.replace('\\', '、')
    primaryKey = primaryKey.replace('/', '~')
    primaryKey = primaryKey.replace(':', '：')
    primaryKey = primaryKey.replace('*', '#')
    primaryKey = primaryKey.replace('?', '？')
    primaryKey = primaryKey.replace('"', '“')
    primaryKey = primaryKey.replace('<', '《')
    primaryKey = primaryKey.replace('>', '》')
    primaryKey = primaryKey.replace('|', '+')
           
    if os.path.exists(saveFolder) == False:
        os.makedirs(saveFolder)

    filepath = os.path.join(saveFolder, primaryKey + '.json')

    fd = open(filepath, mode="w", encoding="utf-8")
    fd.write(json.dumps(spiderData, ensure_ascii=False, indent=3))
    fd.close()

def loadsJsonp(_jsonp):
    try:
        return '[' + re.match(".*?({.*}).*", _jsonp, re.S).group(1) + ']'
    except Exception as e:
        raise ValueError(e)

def requestAndSave(results, parseResult, savePath, sleepTime=0, nodeUrlHandle=''):
    for result in results:
        if sleepTime > 0:
            time.sleep(sleepTime / 1000)

        if 'spiderUrl' not in result:
            continue

        try:
            spiderUrl = result['spiderUrl']

            if '@[URI]' in nodeUrlHandle:
                spiderUrl = nodeUrlHandle.replace('@[URI]',spiderUrl)

            result['spiderData'] = _requestHtmlParse(spiderUrl, parseResult)
            if 'title' in result['spiderData'] or 'content' in result['spiderData']:
                _saveData(result, savePath)
        except Exception as e:
            info = sys.exc_info()
            print(info[1], info[2].tb_lineno)

def parseJsonNode(data, nodeRule):
    dataType = nodeRule['data-type']
    key = nodeRule['key'] if 'key' in nodeRule else None

    childNodes = nodeRule['childNodes'] if 'childNodes' in nodeRule else None

    if dataType == 'Object':
        if childNodes:
            objdata = data[key]
            return parseJsonNode(objdata, childNodes)
        else:
            result = {}
            primaryKey = nodeRule['primary-key'] if 'primary-key' in nodeRule else None

            if key not in data:
                return []

            if primaryKey and primaryKey in data:
                result['primaryKey'] = data[primaryKey]
            else:
                result['primaryKey'] = hashlib.md5(data[key].encode(encoding='UTF-8')).hexdigest()

            result['spiderUrl'] = data[key]
            result['baseData'] = data

            return [result]
    elif  dataType == 'Array':
        result = []

        if childNodes:
            listdata = data
            if key:
                listdata = data[key]

            for item in listdata:
                result = result + parseJsonNode(item, childNodes)

        return result
    elif  dataType == 'String':
        result = {}

        spiderUrl = data[key]
        primaryKey = hashlib.md5(spiderUrl.encode(encoding='UTF-8')).hexdigest()

        result['primaryKey'] = primaryKey
        result['spiderUrl'] = spiderUrl
        result['baseData'] = data

        return [result]

def parseJson(data, parseRules):
    data = json.loads(data) if len(data) else {}
    results = parseJsonNode(data, parseRules)

    return results

def parseHtmlElement(sp, rule, attr):
    # [0].get_text()
    itemList = sp.select(rule)

    spiderElements = []
    for item in itemList:
        if attr == 'text':
            element = item.get_text()
            spiderElements.append(element)
        else:
            element = item[attr] if item.has_attr(attr) else ''
            spiderElements.append(element)

    return spiderElements

def _UrlCompletion(baseUrl, url):
    # print(urlParsedResult.scheme)
    # print(urlParsedResult.netloc)
    # print(urlParsedResult.path)
    urlParsedResult = urllib.parse.urlparse(baseUrl)

    nURl = url

    # data:image 、 https:// 、 http:// 、 javascript: 、 #
    if url and url.startswith('data:image') == False and url.startswith('https://') == False and url.startswith('http://') == False and url.startswith('javascript:') == False and url.startswith('#') == False :
        if url.startswith('//'):
            nURl = urlParsedResult.scheme + ":" + url
        elif url.startswith('/'):
            nURl = urlParsedResult.scheme + "://" + urlParsedResult.netloc + url
        else:
            nURl = urlParsedResult.scheme + "://" + urlParsedResult.netloc + urlParsedResult.path + '/../' + url

    return nURl

def parseHtml(html, baseUrl, parseRules):
    results = []
    if parseRules:
        sp = BeautifulSoup(html, 'html.parser')

        url = parseRules['url']
        title = parseRules['title']
        # primaryKey = parseRules['primary-key']

        urlRule = url['rule']
        urlAttr = url['attr']
        spiderUrls = parseHtmlElement(sp, urlRule, urlAttr)

        titleRule = title['rule']
        titleAttr = title['attr']
        spiderTitles = parseHtmlElement(sp, titleRule, titleAttr)

        for i in range(0, len(spiderUrls)):
            url = spiderUrls[i]
            title = spiderTitles[i]

            url = _UrlCompletion(baseUrl, url)

            result = {}
            result['baseData'] = {'url': url, 'title': title}
            result['spiderUrl'] = url
            result['primaryKey'] = hashlib.md5(url.encode(encoding='UTF-8')).hexdigest()

            results.append(result)

    return results


from concurrent.futures import ThreadPoolExecutor

class ActionThreadSingle(object):
    _instance = None

    # 创建一个包含4条线程的线程池
    threadPool = ThreadPoolExecutor(max_workers=4)
    actionThreadMap = {}

    def put(self, key, value):
        self.actionThreadMap[key] = value

    def remove(self, key):
        del self.actionThreadMap[key]

    def containsKey(self, key):
        return key in self.actionThreadMap

    def __new__(cls, *args, **kw):
        if cls._instance is None:
            cls._instance = object.__new__(cls, *args, **kw)
        return cls._instance
    def __init__(self):
        pass

def startSpider(config, keyId):
    try:
        if 'baseUrl' not in config or 'content-type' not in config or 'enabled' not in config:
            return

        enabled = config['enabled']

        if enabled == 1:
            pageNumStart = config['pageNumStart'] if 'pageNumStart' in config else ''
            pageNumEnd = config['pageNumEnd'] if 'pageNumEnd' in config else ''
            pageSize = config['pageSize'] if 'pageSize' in config else ''

            pageNumStart = pageNumStart or '1'
            pageNumEnd = pageNumEnd or '1'
            pageSize = pageSize or '10'

            baseUrlStr = config['baseUrl']
            saveFolderStr = config['save-folder']

            baseUrlArr = baseUrlStr.split('\n')
            saveFolderArr = saveFolderStr.split('\n')

            for i in range(len(baseUrlArr)):
                baseUrl = baseUrlArr[i]
                saveFolder = saveFolderArr[i]

                for pageNum in range(int(pageNumStart), int(pageNumEnd) + 1):
                    try:
                        tempBaseUrl = baseUrl
                        tempBaseUrl = tempBaseUrl.replace('@[pageSize]', pageSize)
                        tempBaseUrl = tempBaseUrl.replace('@[pageNum]', str(pageNum))
                        tempBaseUrl = tempBaseUrl.replace('@[(pageNum-1)*pageSize]', str((pageNum - 1) * int(pageSize)))
                        print(tempBaseUrl)

                        headers = {}
                        requestHeaderStr = config['request-header'] if 'request-header' in config else ''
                        if requestHeaderStr != '':
                            requestHeaderArr = requestHeaderStr.split('\n')

                            for i in range(len(requestHeaderArr)):
                                requestHeader = requestHeaderArr[i]
                                if requestHeader != '':
                                    headerArr = requestHeader.split(': ')
                                    if len(headerArr) >= 2:
                                        headers[headerArr[0]] = headerArr[1]

                        requestMethod = config['request-method'] if 'request-method' in config else 'GET'

                        resp = _HttpRequest(tempBaseUrl, headers=headers, method=requestMethod)
                        # resp = {'code': 200, 'headers': {}, 'data': ''}

                        if resp['code'] == 200:
                            contentType = config['content-type']
                            parseRules = config['parse-rules']

                            results = []
                            if contentType == 'json':
                                results = parseJson(resp['data'], parseRules)
                            if contentType == 'jsonp':
                                jsonData = loadsJsonp(resp['data'])
                                results = parseJson(jsonData, parseRules)
                            elif contentType == 'html':
                                results = parseHtml(resp['data'], baseUrl, parseRules)

                            savePath = os.path.join(dirname, 'store', saveFolder)
                            parseResult = config['parse-result']
                            nodeUrlHandle = config['node-url-handle'] if 'node-url-handle' in config else ''
                            sleepTime = int(config['sleepTime']) if 'sleepTime' in config and config['sleepTime'] != '' else 0
                            requestAndSave(results, parseResult, savePath, sleepTime, nodeUrlHandle)

                    except Exception as e:
                        info = sys.exc_info()
                        print(info[1], info[2].tb_lineno)

    except Exception as e:
        info = sys.exc_info()
        print(info[1], info[2].tb_lineno)
    finally:
        ats = ActionThreadSingle()
        ats.remove(keyId)

def start(config):
    if 'enabled' not in config:
        return

    enabled = config['enabled']

    if enabled == 1:
        saveFolder = config['save-folder']
        keyId = hashlib.md5(saveFolder.encode(encoding='UTF-8')).hexdigest()

        ats = ActionThreadSingle()
        if ats.containsKey(keyId) == False:
            ats.put(keyId, '')
            future = ats.threadPool.submit(startSpider, config, keyId)
            ats.put(keyId, future)
        else:
            print(saveFolder + '：爬虫执行中')

def getSpiderDownloadData(saveFolder):
    dataFolderPath = os.path.join(dirname, 'store', saveFolder)
    tempPath = os.path.join(dirname, 'temp', str(uuid.uuid1()).replace('-', ''))

    shutil.make_archive(tempPath, 'zip', dataFolderPath)

    fd = open(tempPath + '.zip', mode="rb")
    zipbyte = fd.read()
    fd.close()

    os.remove(tempPath + '.zip')

    return zipbyte

def isSpiderRuning(saveFolder):
    keyId = hashlib.md5(saveFolder.encode(encoding='UTF-8')).hexdigest()
    ats = ActionThreadSingle()
    return ats.containsKey(keyId)
