import json
from time import time
from urllib.parse import urljoin
from queue import Queue
from threading import Thread

import requests
from parsel import Selector

from product_proxy import get_ip
from post_util import save_data

headers = {
    "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4195.1 Safari/537.36"
}
with open('./new_list.csv', 'r', encoding='utf8') as f:
    datas = f.readlines()

datas = [x.strip() for x in datas]
q = Queue()
for one in datas:
    q.put(one)


class Spider(Thread):
    def __init__(self, i=99):
        Thread.__init__(self)
        self.session = requests.Session()
        self.proxies = get_ip()
        self.not_find_save = './not_find{}.txt'.format(i)
        self.prov_li = [11, 12, 13, 14, 15, 21, 22,23,
            31, 32, 33, 34, 35, 36, 37, 41, 42, 43,
            44, 45, 46, 50, 51, 52, 53, 54, 61, 62,
            63, 64, 65]

    def request(self, url, method='get', params=None, data=None, json=None, verify=False, files=None, callback=None):
        req_flag = True
        req_time = 0

        while req_flag:
            try:
                response = self.session.request(url=url, method=method, params=params, json=json, data=data, files=None,
                                                headers=headers,
                                                proxies=self.proxies, verify=verify, timeout=(5,5))
                if callback:
                    callback(response)
                    return
                return response
            except Exception as e:
                req_time += 1
                if req_time > 5:
                    req_flag = False
                    return False
                self.proxies = get_ip()

    def get_cpa(self):
        cpa_flag = True
        while cpa_flag:
            cpa_url = 'http://zgcx.nhc.gov.cn:9090/CaptchaGenerate/Generate'
            resp = self.request(url=cpa_url)
            api_url = "http://10.15.3.16:10011/api"
            # with open('./cpa.jpt', 'wb') as f:
            #     f.write(resp.content)
            resp = requests.post(url=api_url, files={'image': resp.content})
            if len(resp.text) == 6:
                # print(resp.text)
                cpa_flag = False
                return resp.text

    def get_prov(self, k):
        if '北京' in k:
            return '11'
        if '天津' in k:
            return '12'
        if '河北' in k:
            return '13'
        if '山西' in k:
            return '14'
        if '内蒙' in k:
            return '15'
        if '辽宁' in k:
            return '21'
        if '吉林' in k:
            return '22'
        if '黑龙' in k:
            return '23'
        if '上海' in k:
            return '31'
        if '江苏' in k:
            return '32'
        if '浙江' in k:
            return '33'
        if '安徽' in k:
            return '34'
        if '福建' in k:
            return '35'
        if '江西' in k:
            return '36'
        if '山东' in k:
            return '37'
        if '河南' in k:
            return '41'
        if '湖北' in k:
            return '42'
        if '湖南' in k:
            return '43'
        if '广东' in k:
            return '44'
        if '广西' in k:
            return '45'
        if '海南' in k:
            return '46'
        if '重庆' in k:
            return '50'
        if '四川' in k:
            return '51'
        if '贵州' in k:
            return '52'
        if '云南' in k:
            return '53'
        if '西藏' in k:
            return '54'
        if '陕西' in k:
            return '61'
        if '甘肃' in k:
            return '62'
        if '青海' in k:
            return '63'
        if '宁夏' in k:
            return '64'
        if '新疆' in k:
            return '65'
        return '11'

    def get_k(self):
        url = 'http://zgcx.nhc.gov.cn:9090/unit'
        resp = self.request(url, method='get')
        if not resp:
            return False
        resp = Selector(resp.text)
        token = resp.xpath('//input[@name="__RequestVerificationToken"]/@value').get()
        # print(token)
        return token

    def start(self, name='', prov=''):
        # for prov in self.prov_li:
        # print(prov, name)
        token = self.get_k()
        if not token:
            return False
        cpa = self.get_cpa()
        prov_code = self.get_prov(prov)
        data = {
            'Prov': prov_code,
            'Unit_Name': name.strip(),
            'Check_Code': cpa,
            '__RequestVerificationToken': token
        }
        post_url = 'http://zgcx.nhc.gov.cn:9090/unit'
        response = self.request(url=post_url, data=data,method='post')
        resp = Selector(response.text)
        hosps = resp.xpath('//table[@class="table table-bordered"]/tbody/tr/td/a[@class="a"]/@href').getall()
        for one in hosps:
            print('len of details: ', len(hosps))
            url = urljoin(post_url, one)
            self.request(url=url, method='get', callback=self.parse_detail)
        if '未查询到符合条件的医疗机构' in response.text:
            print('未查询到符合条件的医疗机构')
            # with open(self.not_find_save, 'a', encoding='utf-8', newline='') as f:
            #     f.write(name+'\u0001'+city+'\n')
            # return True
            return True
        if '验证码输入错误' in response.text:
            print('error captcha code')
            return False
        if len(hosps) == 0:
            # print('has no hosps~')
            pass

        return True

    def parse_detail(self, response):
        if not response:
            return
        resp = Selector(response.text)
        name = resp.xpath('/html/body/div[1]/div[2]/div[2]/h1/text()').get()
        shengfen = resp.xpath('/html/body/div[1]/div[2]/div[3]/div[1]/div[2]/text()').get()
        shenpijiguan = resp.xpath('/html/body/div[1]/div[2]/div[3]/div[2]/div[2]/text()').get()
        dengjihao = resp.xpath('/html/body/div[1]/div[2]/div[3]/div[3]/div[2]/text()').get()
        dizhi = resp.xpath('/html/body/div[1]/div[2]/div[3]/div[4]/div[2]/text()').get()

        jibie = resp.xpath('/html/body/div[1]/div[2]/div[3]/div[6]/div[2]/text()').get()
        faren = resp.xpath('/html/body/div[1]/div[2]/div[3]/div[7]/div[2]/text()').get()
        fuzeren = resp.xpath('/html/body/div[1]/div[2]/div[3]/div[8]/div[2]/text()').get()
        xukezhengs = resp.xpath('/html/body/div[1]/div[2]/div[3]/div[9]/div[2]/span/text()').getall()
        xukezheng = ''.join(xukezhengs)

        kemus = resp.xpath('/html/body/div[1]/div[2]/div[3]/div[5]/div/text()').getall()
        kemu = ''
        for one in kemus:
            kemu += '|'.join(one.split())

        # print(name, shengfen, shenpijiguan, dengjihao, dizhi)
        # print(jibie, faren, fuzeren, xukezheng)
        # print(kemu)

        item = dict()
        item['name'] = name
        item['shengfen'] = shengfen
        item['shenpijiguan'] = shenpijiguan
        item['dengjihao'] = dengjihao
        item['dizhi'] = dizhi
        item['jibie'] = jibie
        item['faren'] = faren
        item['fuzeren'] = fuzeren
        item['xukezheng'] = xukezheng
        item['kemu'] = kemu
        print(item)
        post_data = {
            "data": item,
            "unique": dengjihao,
            "content": {}
        }
        save_data(post_data)


if __name__ == '__main__':
    # 康益诊所(川力钰富苑东南)四川省
    spider = Spider(1)
    prov = '云南省'
    name = '大兴镇卫生院驻绿春县看守所卫生所'
    spider.start(name=name, prov=prov)
