#!/user/bin/env python3
# -*- coding: utf-8 -*-
#-- anth bluedolphi ---
import threading
import time
from queue import Queue

import requests
from requests.structures import CaseInsensitiveDict
from tqdm import tqdm

from util.HttpUtil.BaseHttpUtil import BaseHttpUtil
from util.XlsUtil import loadXlsGetSheel
from util.imageCode import VerificationCode

# 创建锁
lock = threading.Lock()
resultDatas=[]

class BaseAIYZThread(threading.Thread):
    http = requests.session()
    def __init__(self):
        pass
    def getJsonByUrlWithPost(self,url,data):
        rep = self.http.post(self.url, data=data)
        json1 = rep.json()
        if (json1['code'] == 1):
            return json1
        else:
            return None

    def getJsonByUrlWithGet(self,url):
        rep = self.http.get(url)
        rep.encoding = 'utf-8'
        json1 = rep.json()
        if json1['code'] == 1:
            return json1
        else:
            return None

def getJsonValue(obj, defValue):
    return defValue if obj is None else obj


class Thread_get_rows_Crawl(threading.Thread):
    def __init__(self, threadName, page_url_Queue, headers):
        super(Thread_get_rows_Crawl, self).__init__()
        # 线程名
        self.threadName = threadName
        # 页码队列
        self.page_url_Queue = page_url_Queue
        # 数据队列
        self.headers =headers
        #print(self.headers)

    def getJsonByUrlWithGet(self,url):

        rep = requests.get(url, headers=self.headers)

        rep.encoding = 'utf-8'
        json1 = rep.json()
        #print(json1)
        if json1['code'] == 1:
            return json1
        else:
            return None

    def run(self):
        print("启动 " + self.threadName)
        while True:
            try:
                # 取出一个数字，先进先出
                # 可选参数block，默认值为True
                # 1. 如果对列为空，block为True的话，不会结束，会进入阻塞状态，直到队列有新的数据
                # 2. 如果队列为空，block为False的话，就弹出一个Queue.empty()异常，
                url = self.page_url_Queue.get(False)
                print(url)
                json1 = self.getJsonByUrlWithGet(url)
                print(json1)
                time.sleep(1)
                if json1 is not None:
                    if lock.acquire(1):
                        if json1['data'] is not None:
                            if type(json1['data']) is list:
                                resultDatas.extend(json1['data'])
                            else:
                                resultDatas.extend(json1['data']['rows'])
                        lock.release()
                # print len(content)
            except:
                break

        print("结束 " + self.threadName)

global CRAWL_EXIT
CRAWL_EXIT = False

class AIYZBaseSpider(BaseHttpUtil):
    def getTotalPage(self,url,pageSize):
        rep = self.get(url, headers=self.header)
        rep.encoding = 'utf-8'

        totalPage = self.getTotalPageByJson(rep.json(),pageSize)
        return totalPage

    def getTotalPageByJson(self,json1,pageSize):
        totalPage = 1

        if json1['code'] == 1:
            if json1['data'] is None or type(json1['data']) is list :
                totalRows = len(json1['data'])
                return  totalPage
            totalRows = int(json1['data']['total'])
            totalPage = int(totalRows / pageSize) if totalRows % pageSize < 1 else int(totalRows / pageSize) + 1

        return totalPage

    def saveListToXls(self,saveFileName, titles, list,sheelName):
        wb1, sheet = loadXlsGetSheel(saveFileName, sheelName)

        row = 1
        column = 1
        for key in titles.keys():
            sheet.cell(row=row, column=column, value=titles[key])
            column += 1

        row += 1
        for item in list:
            column = 1
            for key in titles.keys():
                sheet.cell(row=row, column=column, value=item[key])
                column += 1
            row += 1

        wb1.save(saveFileName)
        print("OK")

    def getTotalRowsAndTotalPageByJson(self,json1,pageSize):
        totalPage,totalRows = 1,1

        if json1['code'] == 1:
            if json1['data'] is None or type(json1['data']) is list :
                totalRows = len(json1['data'])
                return totalRows,totalPage

            totalRows = int(json1['data']['total'])
            totalPage = int(totalRows / pageSize) if totalRows % pageSize < 1 else int(totalRows / pageSize) + 1

        return totalRows,totalPage

    def getTotalRowsByJson(self,json1,pageSize):
        totalRows = 0

        if json1['code'] == 1:
            totalRows = int(json1['data']['total'])

        return totalRows

    def getTotalPagesFromUrl(self,url,pageSize,headers=None):
        if(pageSize<0): pageSize = 30
        json1 = self.getJsonByUrlWithGet(url,headers)
        return self.getTotalPageByJson(json1,pageSize)

    def getTotolRowsFromUrl(self,baseUrl):
        pageSize = 1
        page = 1
        url = "%s&page=%s&size=%s&t=%s" % (
            baseUrl, page, pageSize, int(time.time()))
        print(url)
        json1 = self.getJsonByUrlWithGet(url)

        # 如果访问出错就退出
        if json1 is None and json1['code'] != 1:
            return 0
        if "data" in json1:
            if 'total' in json1["data"]:
                return json1['data']['total']
            if 'pageTotal' in json1["data"]:
                return json1['data']['pageTotal']
        return 0

    def create_get_rows_urls(self,baseUrl,pageSize = 20):
        page =1
        url = "%s&page=%s&size=%s&t=%s" % (
            baseUrl, page, pageSize, int(time.time()))
        json1 = self.getJsonByUrlWithGet(url)

        totalPages = self.getTotalPageByJson(json1, pageSize)
        print("totalPages = ",totalPages)
        pbar = tqdm(total=totalPages, desc="生成采集链接进度")
        page_urls_Queue = Queue(totalPages)
        for page in range(totalPages):
            url = "%s&page=%s&size=%s&t=%s" % (
                baseUrl, totalPages-page, pageSize, int(time.time()))
            print(url)
            page_urls_Queue.put(url)
            pbar.update(1)
        pbar.close();
        return page_urls_Queue

    def getFristRowFromUrl(self,baseUrl,page=1,pageSize=1):
        url = "%s&page=%s&size=%s&t=%s" % (
            baseUrl, page, pageSize, int(time.time()))
        print(url)
        rep = self.get(url,headers=self.header)

        if rep.text.__contains__('"rows":'):
            json1 = rep.json()
            return json1["data"]["rows"]
        return None
    def getRowsFromUrlByNoThread(self,baseUrl,page=1,pageSize=100):
        url = "%s&page=%s&size=%s&t=%s" % (
            baseUrl, page, pageSize, int(time.time()*1000))
        print(url)
        rep = self.get(url,headers=self.header)

        if rep.text.__contains__('"rows":'):
            json1 = rep.json()
            return json1["data"]["rows"]
        elif rep.text.__contains__('"data":[{'):
            json1 = rep.json()
            return json1["data"]
        return None

    def getDataFromUrlByNoThread(self,baseUrl,page=1,pageSize=100):
        url = "%s&page=%s&size=%s&t=%s" % (
            baseUrl, page, pageSize, int(time.time()))
        print(url)
        rep = self.get(url,headers=self.header)

        if rep.text.__contains__('"rows":'):
            json1 = rep.json()
            return json1["data"]["rows"]
        elif rep.text.__contains__('"data":[{'):
            json1 = rep.json()
            return json1["data"]
        return None

    #通用取得结果 Rows
    def getRowsFromUrl(self,baseUrl,pageSize = 20):
        """
        返回搜索结果的数据
        :param baseUrl: 基本地址不包含分页数量
        :return:
        """
        #生成采集地址队列
        page_url_Queue =self.create_get_rows_urls(baseUrl,pageSize)
        print("队列长度：",page_url_Queue.qsize())

        crawlList = []
        print("resultDatas:",resultDatas)
        resultDatas.clear()
        for i in range(page_url_Queue.qsize()):
            crawlList.append("采集线程%s号" % (i+1))
            if i>5 :
                break
        print(crawlList)
        # 存储三个采集线程的列表集合
        threadcrawl = []
        for threadName in crawlList:
            thread = Thread_get_rows_Crawl(threadName, page_url_Queue, self.header)
            thread.start()
            thread.join()
            threadcrawl.append(thread)

        # 等待所有线程任务结束。
        for t in threadcrawl:
            t.join()

        print("所有线程任务完成")
        print("resultDatas:", resultDatas)
        #print(resultDatas)
        return resultDatas


    def getRandCode(self,host):
        url = "%s?t=%s" % (self.randcodeUrl, int(time.time()))
        headers = {
            'authority': host,
            'accept': "*/*",
            'x-requested-with': "XMLHttpRequest",
            'user-agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
            'sec-fetch-site': "same-origin",
            'sec-fetch-mode': "cors",
            'referer': "https://%s/login.html" %(host),
            'accept-encoding': "gzip, deflate, br",
            'accept-language': "zh-CN,zh;q=0.9",
            'cache-control': "no-cache",

        }
        rep = self.get(url, headers=headers);
        json = rep.json()
        if (json['code'] == 1):
            image = json['data']['image']
            image = str(image).replace("data:image/png;base64,", "")
            #print(image)
            randCodeImagePath = "code.png"
            self.saveBase64Image(image, fileName=randCodeImagePath)
            verificationCode = VerificationCode().getVerificationCode(randCodeImagePath)
            return json['data']['token'], verificationCode

    def getJsonByUrlWithPost(self,url,data ,header=None):
        if (header  is None):
            header =self.header
        rep = self.post(url, data=data, headers=header)
        json1 = rep.json()
        if (json1['code'] == 1):
            return json1
        else:
            return None

    def getJsonByUrlWithGet(self,url,header=None):
        if (header  is None):
            header =self.header
        rep = self.get(url, headers=header)
        rep.encoding = 'utf-8'
        json1 = rep.json()
        if json1['code'] == 1:
            return json1
        else:
            return None
