import csv
import json
import re

import time
import traceback

from bs4 import BeautifulSoup

from common.request import seleniumutil
from config import config


def to_search_page(browser):
    to_main(browser)
    browser.find_element_by_css_selector("#tabs > div.tabs-header > div.tabs-wrap > ul > li.tabs-first").click()
    browser.switch_to.frame("iframeRightId")
    switch_selected_frame(browser)


def to_search_page1(browser, ifr):
    #to_main(browser)
    browser.switch_to.frame(ifr)
    time.sleep(0.1)


def to_detail_page(browser):
    to_main(browser)
    browser.find_element_by_css_selector("#tabs > div.tabs-header > div.tabs-wrap > ul > li.tabs-last").click()
    browser.switch_to.frame("iframeRightId")
    switch_selected_frame(browser)


def to_main(browser):
    browser.switch_to.default_content()
    # browser.implicitly_wait(10)


def close_detail_page(browser):
    to_main(browser)
    browser.find_element_by_css_selector(
        "#tabs > div.tabs-header > div.tabs-wrap > ul > li.tabs-last > a.tabs-close").click()


def ret_switch_select_frame(browser):
    to_main(browser)
    panals = browser.find_elements_by_css_selector(".tabs-panels .panel")
    for panal in panals:
        if "<div class=\"panel\" style=\"display: block; width:" in panal.get_attribute("outerHTML"):
            iframe = panal.find_element_by_tag_name("iframe")
            return iframe


def switch_selected_frame(browser):
    to_main(browser)
    panals = browser.find_elements_by_css_selector(".tabs-panels .panel")
    for panal in panals:
        if "<div class=\"panel\" style=\"display: block; width:" in panal.get_attribute("outerHTML"):
            iframe = panal.find_element_by_tag_name("iframe")
            browser.switch_to.frame(iframe)
            # browser.implicitly_wait(10)


def get_text(soup, selector):
    try:
        aaa = ""
        selector = selector.replace("nth-child", "nth-of-type")
        t0 = time.clock()
        fff = soup.select(selector)
        if fff:
            aaa = fff[0].text

        # txt = browser.find_element_by_css_selector(selector).text
        t1 = time.clock()
        # print("%s: %s s" % (selector, (str(t1 - t0))))

        return aaa
    except Exception as e:
        if "no such element" in str(e):
            return ""
        return "获取失败" + str(e)


def do_detail_page(browser):
    # print(browser.page_source)
    # 加载电话号码
    browser.find_element_by_css_selector(
        "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(2) > div.showdetailsinfo > span").click()
    soup = BeautifulSoup(browser.page_source, 'html.parser')
    # print(browser.page_source)
    i = 0
    while i < 10:
        i = i + 1
        time.sleep(0.002)
        phone = get_text(soup,
                         "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(2) > div.showdetailsinfo > span")
        if "点击显示号码" != phone:
            break

    userinfo = {}
    userinfo["姓名"] = get_text(soup,
                              "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(1) > div.showdetailsinfo > textarea")
    userinfo["手机号"] = get_text(soup,
                               "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(2) > div.showdetailsinfo > span")
    userinfo["来源"] = get_text(soup,
                              "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(3) > div.showdetailsinfo")
    userinfo["申请城市"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(4) > div.showdetailsinfo")
    userinfo["学历"] = get_text(soup,
                              "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(5) > div.showdetailsinfo")
    userinfo["户籍"] = get_text(soup,
                              "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(6) > div.showdetailsinfo")
    userinfo["年龄"] = get_text(soup,
                              "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(7) > div.showdetailsinfo")
    userinfo["婚姻"] = get_text(soup,
                              "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(8) > div.showdetailsinfo")
    userinfo["身份证"] = get_text(soup,
                               "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(9) > div.showdetailsinfo")
    userinfo["性别"] = get_text(soup,
                              "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(10) > div.showdetailsinfo")
    userinfo["毕业院校"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(11) > div.showdetailsinfo")
    userinfo["客户公司"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(12) > div.showdetailsinfo")
    userinfo["申请额度"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(13) > div.showdetailsinfo")
    userinfo["来源类型"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(1) > ul > li:nth-child(14) > div.showdetailsinfo")
    userinfo["名下房产情况"] = get_text(soup, "#infomation-info10 > div:nth-child(2) > ul > li > div.showdetailsinfo")
    userinfo["名下车产情况"] = get_text(soup,
                                  "#infomation-info10 > div:nth-child(3) > ul > li:nth-child(1) > div.showdetailsinfo")
    userinfo["汽车类型"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(3) > ul > li:nth-child(2) > div.showdetailsinfo")
    userinfo["汽车归属地"] = get_text(soup,
                                 "#infomation-info10 > div:nth-child(3) > ul > li:nth-child(3) > div.showdetailsname")
    userinfo["汽车使用年限"] = get_text(soup,
                                  "#infomation-info10 > div:nth-child(3) > ul > li:nth-child(4) > div.showdetailsinfo")
    userinfo["汽车价值"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(3) > ul > li:nth-child(5) > div.showdetailsinfo")
    userinfo["车险公司"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(3) > ul > li:nth-child(6) > div.showdetailsinfo")
    userinfo["当前保单缴费次数"] = get_text(soup,
                                    "#infomation-info10 > div:nth-child(3) > ul > li:nth-child(7) > div.showdetailsinfo")
    userinfo["车辆状态"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(3) > ul > li:nth-child(8) > div.showdetailsinfo")
    userinfo["BD有无保单"] = get_text(soup,
                                  "#infomation-info10 > div:nth-child(4) > ul > li:nth-child(1) > div.showdetailsinfo")
    userinfo["保单公司"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(4) > ul > li:nth-child(2) > div.showdetailsinfo")
    userinfo["年缴费额"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(4) > ul > li:nth-child(3) > div.showdetailsinfo")
    userinfo["保单状态"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(4) > ul > li:nth-child(4) > div.showdetailsinfo")
    userinfo["已缴次数"] = get_text(soup,
                                "#infomation-info10 > div:nth-child(4) > ul > li:nth-child(5) > div.showdetailsinfo")
    userinfo["两年内最严重的逾期"] = get_text(soup,
                                     "#infomation-info10 > div:nth-child(5) > ul > li:nth-child(1) > div.showdetailsinfo")
    userinfo["一年内最严重的逾期"] = get_text(soup,
                                     "#infomation-info10 > div:nth-child(5) > ul > li:nth-child(2) > div.showdetailsinfo")
    userinfo["半年内最严重的逾期"] = get_text(soup,
                                     "#infomation-info10 > div:nth-child(5) > ul > li:nth-child(3) > div.showdetailsinfo")
    userinfo["微粒贷额度"] = get_text(soup,
                                 "#infomation-info10 > div:nth-child(5) > ul > li:nth-child(4) > div.showdetailsinfo")
    userinfo["芝麻信用分"] = get_text(soup,
                                 "#infomation-info10 > div:nth-child(5) > ul > li:nth-child(5) > div.showdetailsinfo")
    userinfo["信用卡情况"] = get_text(soup,
                                 "#infomation-info10 > div:nth-child(6) > ul > li:nth-child(1) > div.showdetailsinfo")
    userinfo["贷记卡总额总额度"] = get_text(soup,
                                    "#infomation-info10 > div:nth-child(6) > ul > li:nth-child(2) > div.showdetailsinfo")
    userinfo["贷记卡当前使用额度"] = get_text(soup,
                                     "#infomation-info10 > div:nth-child(6) > ul > li:nth-child(3) > div.showdetailsinfo")
    userinfo["贷记卡近半年使用额度"] = get_text(soup,
                                      "#infomation-info10 > div:nth-child(6) > ul > li:nth-child(4) > div.showdetailsinfo")
    userinfo["单张贷记卡最高负债率"] = get_text(soup,
                                      "#infomation-info10 > div:nth-child(6) > ul > li:nth-child(5) > div.showdetailsinfo")
    userinfo["有无信用贷款"] = get_text(soup,
                                  "#infomation-info10 > div:nth-child(6) > ul > li:nth-child(6) > div.showdetailsinfo")
    userinfo["客户星级"] = browser.find_element_by_css_selector("#stars > option[selected=selected]").text
    userinfo["状态"] = browser.find_element_by_css_selector("#status > option[selected=selected]").text

    memo_eles = browser.find_elements_by_css_selector("#hisul > li")
    memo_jsons = []
    for m in memo_eles:
        memo_jsons.append(m.text)
    userinfo["备注"] = str(memo_jsons)
    return userinfo


USERNAME = ""


def login(browser, username, pwd):
    global USERNAME
    USERNAME = username
    logout(browser)
    url = "https://www.sudai9.com/manage/index/indexlogin.html?brand=29"
    browser.get(url)
    browser.find_element_by_css_selector("#kefu_number").send_keys(username)
    browser.find_element_by_css_selector("#smscode").clear()
    browser.find_element_by_css_selector("#kefu_password").clear()
    browser.find_element_by_css_selector("#kefu_password").send_keys(pwd)

    # # print("账号%s,输入验证码:" % username);
    # smscode = "1234"  # "#input()

    browser.find_element_by_css_selector("#smscode").send_keys("1234")
    browser.find_element_by_css_selector(
        "body > div.container-fluid.login_box > div.container > form > div:nth-child(4) > button").click()
    browser.implicitly_wait(5)
    if browser.page_source.__contains__("账户和密码不匹配"):
        print(username + "账号密码不匹配")
        return False
    else:
        return True


def logout(browser):
    try:
        # to_main(browser)
        # browser.find_element_by_css_selector(
        #     "body > div.panel.layout-panel.layout-panel-north > div > div.pull-right.nav_daohang > ul > li.shezhi > div > ul > li:nth-child(4) > a").click()
        browser.get("https://www.sudai9.com/manage/index/indexlogout.html")
    except:
        pass


def 我的客户(browser):
    # https://www.sudai9.com/manage/kefu_clients/index.html
    # 先打开我的客户
    to_main(browser)
    browser.implicitly_wait(3)

    # _easyui_tree_2 > span.tree-title


    menus = browser.find_elements_by_css_selector("#tree > li >.tree-node > span.tree-title")
    for m in menus:
        if "我的客户" in m.text:
            m.click()
            break
    search_iframe = ret_switch_select_frame(browser)
    # 关闭第一个,因为自己的新打开的



    data_csvfile = open(config.PATH_DESKTOP + "/我的客户%s.csv" % USERNAME, "a", newline='', encoding='utf-8')
    data_writer = csv.writer(data_csvfile)
    errorfile = open(config.PATH_DESKTOP + "error%s.txt" % USERNAME, "w")
    errorfile.writelines("开始了")
    headers = None

    for i in range(1, 1000):
        print("我的客户页码:" + str(i))
        to_search_page1(browser, search_iframe)
        trs = browser.find_elements_by_css_selector("#lk_content > table > tbody > tr")

        for tr in trs:
            try:
                to_search_page1(browser, search_iframe)
                tr_html = tr.get_attribute("innerHTML")
                if "Open" not in tr_html:
                    continue
                tr.find_elements_by_css_selector("td")[2].click()
                to_detail_page(browser)
                r = do_detail_page(browser)
                if not headers:
                    headers = r.keys()
                    data_writer.writerow(headers)
                data_writer.writerow([r[h] for h in headers])

                close_detail_page(browser)
            except Exception as e:
                errorfile.writelines("----------------------------------------------")
                errorfile.writelines(tr.text)
                errorfile.writelines(str(e))
        if browser.find_elements_by_css_selector("#pagination > a.next"):
            try:
                browser.find_element_by_css_selector("#pagination > a.next").click()
            except Exception as e:
                traceback.print_exc()
        else:
            break
        data_csvfile.flush()
    data_csvfile.flush()
    data_csvfile.close()
    errorfile.close()


def 再分配客户(browser):
    # https://www.sudai9.com/manage/kefu_clients/index.html
    # 先打开我的客户
    to_main(browser)
    browser.implicitly_wait(3)

    # _easyui_tree_2 > span.tree-title


    menus = browser.find_elements_by_css_selector("#tree > li >.tree-node > span.tree-title")
    for m in menus:
        if "再分配客户" in m.text:
            m.click()
            break
    search_iframe = ret_switch_select_frame(browser)
    # 关闭第一个,因为自己的新打开的



    data_csvfile = open(config.PATH_DESKTOP + "/再分配客户%s.csv" % USERNAME, "a", newline='', encoding='utf-8')
    data_writer = csv.writer(data_csvfile)
    errorfile = open(config.PATH_DESKTOP + "error%s.txt" % USERNAME, "w")
    errorfile.writelines("开始了")
    headers = None

    for i in range(1, 1000):

        print("再分配客户页码:" + str(i))
        to_search_page1(browser, search_iframe)
        try:
            trs = browser.find_elements_by_css_selector("#lk_content > table > tbody > tr")
        except:
            try:
                trs = browser.find_elements_by_css_selector("#lk_content > table > tbody > tr")
            except:
                break
        for tr in trs:
            try:
                to_search_page1(browser, search_iframe)
                tr_html = tr.get_attribute("innerHTML")
                if "Open" not in tr_html:
                    continue
                tr.find_elements_by_css_selector("td")[2].click()
                to_detail_page(browser)
                r = do_detail_page(browser)
                if not headers:
                    headers = r.keys()
                    data_writer.writerow(headers)
                data_writer.writerow([r[h] for h in headers])

                close_detail_page(browser)
            except Exception as e:
                traceback.print_stack()
                errorfile.writelines("----------------------------------------------")
                errorfile.writelines(tr.text)
                errorfile.writelines(str(e))
        if browser.find_elements_by_css_selector("#pagination > a.next"):
            try:
                browser.find_element_by_css_selector("#pagination > a.next").click()
            except Exception as e:
                traceback.print_exc()
        else:
            break
        data_csvfile.flush()
    data_csvfile.flush()
    data_csvfile.close()
    errorfile.close()


def execute(browser, username, pwd):
    # https://www.sudai9.com/manage/kefu_clients/index.html
    # 先打开我的客户

    url = "https://www.sudai9.com/manage/index/indexlogin.html?brand=29"
    print(username)
    if not login(browser, username, pwd):
        return

    try:
        再分配客户(browser)
    except Exception as e:
        traceback.print_exc()
        try:
            再分配客户(browser)
        except Exception as e1:
            traceback.print_exc()

    try:
        我的客户(browser)
    except Exception as e:
        traceback.print_exc()
        try:
            我的客户(browser)
        except Exception as e1:
            traceback.print_exc()

    try:
        logout(browser)
    except Exception as e:
        traceback.print_exc()


def eee2(users1):
    users = json.loads(users1)
    browser = seleniumutil.chrome_driver_clean()

    for u in users:
        try:
            logout(browser)
            execute(browser, u[0], u[1])
        except:
            traceback.print_exc()


def e(users1):
    browser = seleniumutil.chrome_driver_remote()

    for u in users1:
        try:
            logout(browser)
            execute(browser, u[0], u[1])
        except:
            traceback.print_exc()


if __name__ == "__main__":
    users2 = [
        ['13003230983', '13003230983'],
        ['18521782191', '18521782191'],
        ['13918479298', '13918479298'],
        ['13651825857', 'zhudaiwang168'],
        ['18602129102', 'zhudaiwang168'],
        ['13482033114', 'zhudaiwang168'],
        ['15900540276', 'zhidaiwang168'],
        ['13482311514', 'sudaiwang168'],
        ['18616056510', 'sudaiwang168'],
        ['15618967319', 'zhudaiwang168'],
        ['13248326806', 'zhudaiwang168'],
        ['18602126729', 'zhangcheng1234'],
        ['13636336402', 'zhudaiwang168'],
        ['13391018063', 'sudaiwang168']]

    e(users2)
