#!/usr/bin/python3
import sys
import time
import random
import re
import requests
from functools import reduce
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from bs4 import BeautifulSoup
try:
    import Image
except ImportError:
    from PIL import Image
import pytesseract

from log import init_global_log, info, warn, debug, error

SPIDER_LOG = "selenium_spider.log"

URL_SCREENSHOT_PATH = "/var/tmp/screenshot.png"
VERIFY_PIC_PATH = "/var/tmp/%s"

def download_image_file(imgUrl):
    new_pic_url = imgUrl + '?j=%f' % (random.random())
    debug(new_pic_url)
    r = requests.get(new_pic_url, stream=True)
    debug(r.status_code)
    img_path = VERIFY_PIC_PATH % ('test.png')
    with open(img_path, 'wb') as fp:
        for chunk in r.iter_content(chunk_size=1024):
            if chunk:
                fp.write(chunk)
                fp.flush()
        return img_path, new_pic_url

def verify_ocr_code(img_path):
    res = pytesseract.image_to_string(Image.open(img_path))
    return res

def get_basic_insured_information(headers, cookies):
    url = "http://insurance.cdhrss.gov.cn/QueryInsuranceInfo.do?flag=1"
    headers["Referer"] = "http://insurance.cdhrss.gov.cn/insurance/main.jsp"
    html = get_social_security_information_common(url, headers, cookies)
    debug(html)
    soup = BeautifulSoup(html, "lxml")
    th_list = soup.find_all("th")
    td_list = soup.find_all("td")
    basic_insured_info = {}
    if len(th_list) == len(td_list):
        for th, td in zip(th_list, td_list):
            key = th.text.replace(b"\xc2\xa0".decode("utf-8"), "").replace(" ", "").replace("：", "").strip()
            val = td.text.strip()
            basic_insured_info[key] = val
    return basic_insured_info

def get_endowment_insurance_pay_information(headers, cookies):
    url = "http://insurance.cdhrss.gov.cn/QueryInsuranceInfo.do?flag=3"
    headers["Referer"] = "http://insurance.cdhrss.gov.cn/QueryInsuranceInfo.do?flag=1"
    html = get_social_security_information_common(url, headers, cookies)
    debug(html)
    soup = BeautifulSoup(html, "lxml")
    td_list = soup.find_all("td", text=re.compile("^￥"))
    endowment_info = {"pay_detail":[], "company_pay_total":0.0, "personal_pay_total":0.0}
    pay_detail = {}
    for i, td in enumerate(td_list):
        pay_price = float(td.text.replace("￥", "").replace(",", ""))
        c = i % 3
        if c == 0:
            # 这个是缴费基数
            pay_detail['basic_pay_number'] = pay_price
        elif c == 1 and pay_detail:
            # 公司缴费
            pay_detail['company_pay'] = pay_price
        elif c == 2 and pay_detail:
            # 个人缴费
            pay_detail['personal_pay'] = pay_price
            endowment_info["pay_detail"].append(pay_detail)
            pay_detail = {}
    company_pay_list = map(lambda e:e["company_pay"], endowment_info["pay_detail"])
    endowment_info["company_pay_total"] = reduce(lambda a, b: a + b, company_pay_list)
    personal_pay_list = map(lambda e:e["personal_pay"], endowment_info["pay_detail"])
    endowment_info["personal_pay_total"] = reduce(lambda a, b: a + b, personal_pay_list)
    return endowment_info

def get_medical_insurance_pay_information(headers, cookies):
    url = "http://insurance.cdhrss.gov.cn/QueryInsuranceInfo.do?flag=5"
    headers["Referer"] = "http://insurance.cdhrss.gov.cn/QueryInsuranceInfo.do?flag=1"
    html = get_social_security_information_common(url, headers, cookies)
    debug(html)
    soup = BeautifulSoup(html, "lxml")
    td_list = soup.find_all("td", text=re.compile("^￥"))
    medical_insurance_info = {"pay_detail":[], "company_pay_total":0.0, \
                                "personal_pay_total":0.0, "amount_of_pay":0.0}
    pay_detail = {}
    for i, td in enumerate(td_list):
        pay_price = float(td.text.replace("￥", "").replace(",", ""))
        c = i % 4
        if c == 0:
            # 这个是缴费基数
            pay_detail['basic_pay_number'] = pay_price
        elif c == 1 and pay_detail:
            # 公司缴费
            pay_detail['company_pay'] = pay_price
        elif c == 2 and pay_detail:
            # 个人缴费
            pay_detail['personal_pay'] = pay_price
        elif c == 3 and pay_detail:
            # 划入金额
            pay_detail['amount_of_pay'] = pay_price
            medical_insurance_info["pay_detail"].append(pay_detail)
            pay_detail = {}

    company_pay_list = map(lambda e:e["company_pay"], medical_insurance_info["pay_detail"])
    medical_insurance_info["company_pay_total"] = reduce(lambda a, b: a + b, company_pay_list)
    personal_pay_list = map(lambda e:e["personal_pay"], medical_insurance_info["pay_detail"])
    medical_insurance_info["personal_pay_total"] = reduce(lambda a, b: a + b, personal_pay_list)
    amount_of_pay_list = map(lambda e:e["amount_of_pay"], medical_insurance_info["pay_detail"])
    medical_insurance_info["amount_of_pay"] = reduce(lambda a, b: a + b, amount_of_pay_list)
    return medical_insurance_info

def get_medical_insurance_consumption_information(headers, cookies):
    url = "http://insurance.cdhrss.gov.cn/QueryInsuranceInfo.do?flag=18"
    headers["Referer"] = "http://insurance.cdhrss.gov.cn/QueryInsuranceInfo.do?flag=1"
    html = get_social_security_information_common(url, headers, cookies)
    debug(html)
    soup = BeautifulSoup(html, "lxml")
    td_list = soup.find_all("td", text=re.compile("^￥"))
    medical_consumption_info = {"pay_detail":[], "total_consumption_amount":0.0}
    pay_detail = {}
    for i, td in enumerate(td_list):
        pay_price = float(td.text.replace("￥", "").replace(",", ""))
        # 消费金额
        pay_detail['consumption_amount'] = pay_price
        medical_consumption_info["pay_detail"].append(pay_detail)
        pay_detail = {}

    company_pay_list = map(lambda e:e["consumption_amount"], medical_consumption_info["pay_detail"])
    medical_consumption_info["total_consumption_amount"] = reduce(lambda a, b: a + b, company_pay_list)
    return medical_consumption_info

def get_social_security_information_common(url, headers, cookies, try_count=3):
    ret = None
    for t in range(try_count):
        try:
            res = requests.get(url,headers=headers, cookies=cookies)
            if res.status_code == 200:
                ret = res.text
                if "没有查询出相关数据" in ret:
                    warn("Failed to get available data, will try it again.")
                    continue
                break
            else:
                error("Failed to get information, will try it again.")
        except Exception as ex:
            error(ex)
    return ret

def get_input_elems(driver):
    # document.getElementById("div2").
    # getElementsByClassName("input-box")[0].getElementsByTagName("input")
    elem_div = driver.find_element_by_id("div2")
    elem_input_boxs = elem_div.find_elements_by_class_name("input-box")
    elem_inputs = elem_input_boxs[0].find_elements_by_tag_name("input")
    return elem_inputs  

def get_qrcode_img(driver):
    # document.getElementById("div2").getElementsByClassName("input-box")[0].
    # getElementsByClassName("qrcode")[0].getElementsByTagName("img")[0]
    elem_div = driver.find_element_by_id("div2")
    elem_input_boxs = elem_div.find_elements_by_class_name("input-box")
    elem_qrcode_div = elem_input_boxs[0].find_elements_by_class_name("qrcode")
    elem_imgs = elem_qrcode_div[0].find_elements_by_tag_name("img")
    return elem_imgs[0]

def get_login_btn(driver):
    elem_div = driver.find_element_by_id("div2")
    elem_login_btns = elem_div.find_elements_by_class_name("btn")
    elem_login_btn = elem_login_btns[0]
    return elem_login_btn

def wait_to_page_load_complete(driver):
    try:
        locator = (By.XPATH, '//div[@id="div2"]//div[@class="qrcode"]/img')
        WebDriverWait(driver, 60).until(EC.visibility_of_element_located(locator))
    except Exception as ex:
        error(ex)
        driver.close()
        sys.exit(1)


if __name__ == '__main__':
    init_global_log(SPIDER_LOG)
    user = ""
    pwd = ""
    driver = webdriver.Firefox()
    driver.get("http://jypt.cdhrss.gov.cn:8048/portal.php?id=1")
    wait_to_page_load_complete(driver)
    driver.save_screenshot(URL_SCREENSHOT_PATH)
    time.sleep(2)
    input_elems = get_input_elems(driver)
    # elem_user = driver.find_element_by_id("usn")
    elem_user = input_elems[0]
    elem_user.clear()
    elem_user.send_keys(user)
    time.sleep(2)
    # elem_pwd = driver.find_element_by_id("pwd")
    elem_pwd = input_elems[1]
    elem_pwd.clear()
    elem_pwd.send_keys(pwd)
    time.sleep(2)
    # get verify picture
    # http://jypt.cdhrss.gov.cn:8045/yhjypt/CaptchaImg
    # elem_ver_pic = driver.find_element_by_id("codeimg")
    elem_ver_pic = get_qrcode_img(driver)
    location = elem_ver_pic.location
    size = elem_ver_pic.size
    rangle = (int(location['x']), int(location['y']), int(location['x'] + size['width']), int(location['y'] + size['height']))
    screen_shot = Image.open(URL_SCREENSHOT_PATH)
    frame_verify_pic = screen_shot.crop(rangle)
    img_l = VERIFY_PIC_PATH % ('test.tif')
    frame_verify_pic.save(img_l)
    res = verify_ocr_code(img_l)
    info("verify code: ", res)
    if res and res.isdigit() and len(res) >= 3:
        time.sleep(2)
        # elem_ver_code = driver.find_element_by_id("checkCode")
        elem_ver_code = input_elems[2]
        elem_ver_code.clear()
        elem_ver_code.send_keys(res)
        time.sleep(2)
        # elem_login_btn = driver.find_element_by_id("loginBtn")
        elem_login_btn = get_login_btn(driver)
        elem_login_btn.click()
        time.sleep(3)
        jsessionid_cookie = driver.get_cookie('JSESSIONID')
        cookies = {}
        cookies['JSESSIONID'] = jsessionid_cookie['value']
        common_headers = {
            "Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
            "Accept-Encoding" : "gzip, deflate",
            "Accept-Language" : "en-US,en;q=0.5",
            "Connection" : "keep-alive",
            "Host" : "insurance.cdhrss.gov.cn",
            "Upgrade-Insecure-Requests" : 1,
            "User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:53.0) Gecko/20100101 Firefox/53.0"
        }
        user_base_info = get_basic_insured_information(common_headers, cookies)
        endowment_info = get_endowment_insurance_pay_information(common_headers, cookies)
        medical_insurance_info = get_medical_insurance_pay_information(common_headers, cookies)
        medical_consumption_info = get_medical_insurance_consumption_information(common_headers, cookies)
        info("""姓名:%s 养老保险公司缴费总数:%f 养老保险个人缴费总数:%f 医疗保险公司缴费总数:%f \
            医疗保险个人缴费总数:%f 医疗保险累计发放金额:%f 医疗保险累计消费金额:%f""" % \
            (user_base_info["姓名"], endowment_info["company_pay_total"], \
                endowment_info["personal_pay_total"], medical_insurance_info["company_pay_total"], \
                medical_insurance_info["personal_pay_total"], medical_insurance_info["amount_of_pay"], \
                medical_consumption_info["total_consumption_amount"]))
    else:
        error("Not a valid verify code!")
        img_l_f = VERIFY_PIC_PATH % ('test_verify_fail_%d.tif' % (time.time()))
        frame_verify_pic.save(img_l_f)
    driver.close()
