# -*- coding: utf-8 -*-
import time
import json

from scrapy import Request
from scrapy.selector import Selector

from amac_spider.utils.my_redis_spider import RedisSpiderPro
from amac_spider.items import (FundManagerListItem, FundManagerDetailItem, FundManagerDetailManagementItem,
                               FundMangerMemberInfoItem, FundManagerDetailManagementWorkExperienceItem,
                               FundManagerRelatedPartyInfoItem, FundManagerPromoterInfoItem,
                               FundManagerImplementBeforeItem, FundManagerImplementAfterItem,
                               FundManagerInvestAdviserProductItem, FundManagerOrgHonestyInfoItem,
                               LegalOpinionStatusItem)
from amac_spider.utils.handler import handle_url, get_url, get_date, get_datetime, handle_string, get_unit, get_md5


# 私募基金管理人名录
class FundManagerSpider(RedisSpiderPro):
    name = 'fund_manager'

    def parse(self, response):
        list_item = FundManagerListItem()
        info_list = json.loads(response.text)['content']
        mappings = {'manager_id': 'id', 'manager_name': 'managerName', 'artificial_name': 'artificialPersonName',
                    'org_type': 'primaryInvestType', 'register_addr': 'officeProvince', 'record_code': 'registerNo',
                    'found_time': 'establishDate', 'record_time': 'registerDate'}
        for info in info_list:
            for k, v in mappings.items():
                list_item[k] = info[v]

            detail_url = handle_url(get_url(self.name), list_item['manager_id'])
            list_item['detail_url'] = detail_url
            list_item['found_time'] = get_date(list_item['found_time']/1000)
            list_item['record_time'] = get_date(list_item['record_time']/1000)
            list_item['type'] = 'list'
            list_item['crawl_time'] = get_datetime(time.time())
            yield list_item
            yield Request(url=detail_url, callback=self.parse_detail, dont_filter=True, meta={'manager_id': list_item['manager_id']})

    def parse_detail(self, response):
        detail_item = FundManagerDetailItem()
        selector = Selector(response)
        org_info_mappings = {'基金管理人全称(英文)': 'english_manager_name', '登记编号': 'record_code',
                    '组织机构代码': 'org_code','登记时间': 'record_time', '成立时间': 'found_time',
                    '注册地址': 'register_addr', '办公地址': 'office_addr','注册资本': 'registered_capital',
                    '实缴资本': 'pay_capital','比例': 'scale', '企业性质': 'enterprise_character',
                    '机构类型': 'org_type','业务类型': 'professional_work_type', '全职员工人数': 'full_time_person_num',
                    '取得基金从业人数': 'acquire_fund_person_num', '机构网址': 'org_site'}
        manager_id = response.meta.get('manager_id')
        detail_url = response.url

        # 解析机构信息
        detail_item['chinese_manager_name'] = selector.css('#complaint2::text').extract_first()
        manager_name = detail_item['chinese_manager_name']
        for i in range(2, 18):
            base_xpath_selector = selector.xpath("//*[@class='info-body']/div[2]/div[2]//tr[{}]".format(i))
            title = base_xpath_selector.xpath('./td[1]//text()').extract_first()
            if handle_string(title) == "注册资本实缴比例":
                title = title.replace('注册资本实缴', '')
            for k, v in org_info_mappings.items():
                if k in title:
                    detail_item[v] = handle_string(base_xpath_selector.xpath('./td[2]/text()').extract_first())
                    if k == '注册资本':
                        detail_item[v] = detail_item[v] + get_unit(title)
                    elif k == '实缴资本':
                        detail_item[v] = detail_item[v] + get_unit(title)
                    break
        last_div_base = selector.xpath("//*[@class='info-body']/div[2]/div[2]//tr[last()]")
        if "第三方机构" in handle_string(last_div_base.xpath('./td[1]//text()').extract_first()):
            detail_item['provide_investment_advice_tolly'] = handle_string(last_div_base.xpath('./td[2]//text()').extract_first())

        # 解析会员信息
        member_info_item = FundMangerMemberInfoItem()
        member_info_mappings = {'是否为会员': 'is_member', '会员代表': 'member_name',
                                '当前会员类型': 'member_type', '入会时间': 'add_member_time'}
        tr_list = selector.xpath("//*[@class='info-body']/div[3]//tr")
        new_td_list = []
        for tr in tr_list:
            td_list = (tr.xpath('./td'))
            for td in td_list:
                new_td_list.append(td)
        for member_td_index in range(0, len(new_td_list), 2):
            for k, v in member_info_mappings.items():
                if k == new_td_list[member_td_index].xpath('./text()').extract_first():
                    member_info_item[v] = new_td_list[member_td_index+1].xpath('./text()').extract_first()
        member_info_item['manager_id'] = manager_id
        member_info_item['manager_name'] = manager_name
        member_info_item['detail_url'] = response.url
        member_info_item['type'] = 'member_info'
        member_info_item['crawl_time'] = get_datetime(time.time())
        yield member_info_item

        # 解析法律意见书信息
        legal_opinion_status_item = LegalOpinionStatusItem()
        legal_opinion_status_mappings = {"法律意见书状态": "legal_opinion_status", "律师事务所名称": "lawfirm_name",
                                         "律师姓名": "lawyer_name"}
        legal_opinion_status_tr_list = response.xpath("//*[@class='info-body']/div[4]//tr")
        legal_opinion_status_td_list = []
        for tr in legal_opinion_status_tr_list:
            legal_opinion_status_td_list.extend(tr.xpath('.//td'))
        for legal_opinion_status_td_index in range(0, len(legal_opinion_status_td_list), 2):
            for k, v in legal_opinion_status_mappings.items():
                if handle_string(legal_opinion_status_td_list[legal_opinion_status_td_index].xpath('.//text()').extract_first()) == k:
                    legal_opinion_status_item[v] = handle_string(legal_opinion_status_td_list[legal_opinion_status_td_index+1].xpath('.//text()').extract_first())
        legal_opinion_status_item['manager_id'] = manager_id
        legal_opinion_status_item['manager_name'] = manager_name
        legal_opinion_status_item['detail_url'] = response.url
        legal_opinion_status_item['type'] = 'legal_opinion_status'
        legal_opinion_status_item['crawl_time'] = get_datetime(time.time())
        yield legal_opinion_status_item

        # 解析实际控制人信息
        reality_control_person_list = response.xpath("//*[@class='info-body']/div[5]//td//text()").extract()
        if handle_string(reality_control_person_list[0]) == "实际控制人信息":
            detail_item['reality_control_person'] = handle_string(reality_control_person_list[1])

        # 解析高管信息
        management_item = FundManagerDetailManagementItem()
        management_item['manager_id'] = manager_id
        management_item['manager_name'] = manager_name
        management_item['detail_url'] = response.url
        management_item['type'] = 'management_info'
        management_item['crawl_time'] = get_datetime(time.time())
        management_info_mappings = {'职务': 'position', '姓名': 'name',
                                    '是否有基金从业资格': 'have_fund_obtain_qualification',
                                    '资格获取方式': 'get_qualification_fashion'}
        tr_list = selector.xpath("//*[@class='info-body']/div[6]/div[2]/table/tbody/tr")
        for index in range(0, len(tr_list), 3):
            td0_list = tr_list[index].xpath(".//td")
            for i in range(0, len(td0_list), 2):
                for k, v in management_info_mappings.items():
                    if handle_string(td0_list[i].xpath("./text()").extract_first()) == k:
                        management_item[v] = handle_string(td0_list[i+1].xpath("./text()").extract_first())
            td1_list = tr_list[index+1].xpath(".//td")
            for i in range(0, len(td1_list), 2):
                for k, v in management_info_mappings.items():
                    if handle_string(td1_list[i].xpath("./text()").extract_first()) == k:
                        management_item[v] = handle_string(td1_list[i+1].xpath("./text()").extract_first())

            # 解析高管工作履历信息
            management_work_experience_item = FundManagerDetailManagementWorkExperienceItem()
            work_experience_tr_list = tr_list[index+2].xpath("./td[2]//tr")
            for work_index in range(1, len(work_experience_tr_list)):
                work_experience_td_list = work_experience_tr_list[work_index].xpath(".//td")
                management_work_experience_item['name'] = management_item['name']
                management_work_experience_item['time'] = handle_string(work_experience_td_list[0].xpath('./text()').extract_first())
                management_work_experience_item['company_name'] = handle_string(work_experience_td_list[1].xpath('./text()').extract_first())
                management_work_experience_item['department'] = handle_string(work_experience_td_list[2].xpath('./text()').extract_first())
                management_work_experience_item['position'] = handle_string(work_experience_td_list[3].xpath('./text()').extract_first())
                management_work_experience_item['manager_id'] = manager_id
                management_work_experience_item['manager_name'] = manager_name
                management_work_experience_item['detail_url'] = detail_url
                management_work_experience_item['unique_md5'] = get_md5(management_work_experience_item['manager_id'],
                                                                        management_work_experience_item['name'],
                                                                        management_work_experience_item['time'],
                                                                        management_work_experience_item['company_name'],
                                                                        management_work_experience_item['department'],
                                                                        management_work_experience_item['position'])
                management_work_experience_item['type'] = 'management_work_experience'
                management_work_experience_item['crawl_time'] = get_datetime(time.time())
                yield management_work_experience_item
            yield management_item

        # 解析关联方信息
        related_party_info = FundManagerRelatedPartyInfoItem()
        related_party_info_tr_list = selector.xpath("//*[@class='info-body']/div[7]//table[@class='list-table']//tr")
        for index in range(1, len(related_party_info_tr_list)):
            related_party_info_td_list = related_party_info_tr_list[index].xpath(".//td")
            related_party_info["r_type"] = related_party_info_td_list[1].xpath(".//text()").extract_first()
            related_party_info["related_name"] = related_party_info_td_list[2].xpath(".//text()").extract_first()
            related_party_info["related_detail_url"] = get_url(self.name) +"/{}".format(related_party_info_td_list[2].xpath("./a/@href").extract_first())
            related_party_info["related_id"] = related_party_info_td_list[2].xpath("./a/@href").extract_first().replace('.html', '')
            related_party_info["record_code"] = related_party_info_td_list[3].xpath(".//text()").extract_first()
            related_party_info["org_code"] = related_party_info_td_list[4].xpath(".//text()").extract_first()
            related_party_info['manager_id'] = manager_id
            related_party_info['manager_name'] = manager_name
            related_party_info['detail_url'] = detail_url
            related_party_info['type'] = 'related_party_info'
            related_party_info['crawl_time'] = get_datetime(time.time())
            yield related_party_info

        # 解析出资人信息
        promoter_info = FundManagerPromoterInfoItem()
        promoter_info_tr_list = selector.xpath("//*[@class='info-body']/div[8]//table[@class='list-table']//tr")
        for index in range(1, len(promoter_info_tr_list)):
            promoter_info_td_list = promoter_info_tr_list[index].xpath(".//td")
            promoter_info["name"] = promoter_info_td_list[1].xpath(".//text()").extract_first()
            promoter_info["scale"] = promoter_info_td_list[2].xpath(".//text()").extract_first()
            promoter_info['manager_id'] = manager_id
            promoter_info['manager_name'] = manager_name
            promoter_info['detail_url'] = detail_url
            promoter_info['type'] = 'promoter_info'
            promoter_info['crawl_time'] = get_datetime(time.time())
            yield promoter_info

        # 解析产品信息
        implement_before_found_fund = FundManagerImplementBeforeItem()
        implement_after_found_fund = FundManagerImplementAfterItem()
        invest_adviser_product = FundManagerInvestAdviserProductItem()
        product_info_tr_list = selector.xpath("//*[@class='info-body']/div[9]/div[2]/table/tbody/tr")
        product_info_list = ['implement_before_found_fund', 'implement_after_found_fund', 'invest_adviser_product']
        for index, value in enumerate(product_info_list):
            current_item_object = eval('{}'.format(value))
            tr_list = product_info_tr_list[index].xpath(".//tr")
            for tr_index in range(1, len(tr_list)):
                td_list = tr_list[tr_index].xpath(".//td")
                current_item_object['company_name'] = handle_string(td_list[0].xpath(".//text()").extract_first())
                current_item_object['company_detail_url'] = get_url(self.name).rsplit("/", 1)[0] + td_list[0].xpath("./a/@href").extract_first().replace('..', '')
                current_item_object['company_id'] = td_list[0].xpath("./a/@href").extract_first().rsplit("/", 1)[-1].replace('.html', '')
                current_item_object['current_month_report'] = handle_string(",".join(td_list[1].xpath(".//text()").extract()))
                current_item_object['quarter_report'] = handle_string(",".join(td_list[2].xpath(".//text()").extract()))
                current_item_object['half_a_year_report'] = handle_string(",".join(td_list[3].xpath(".//text()").extract()))
                current_item_object['year_report'] = handle_string(",".join(td_list[4].xpath(".//text()").extract()))
                current_item_object['manager_id'] = manager_id
                current_item_object['manager_name'] = manager_name
                current_item_object['detail_url'] = detail_url
                current_item_object['type'] = value
                current_item_object['crawl_time'] = get_datetime(time.time())
                yield current_item_object

        # 解析诚信信息
        org_honesty_info = FundManagerOrgHonestyInfoItem()
        org_honesty_info['honesty_info'] = {}
        org_honesty_info_mappings = {'机构信息最后更新时间': 'org_info_end_update_time', '特别提示信息': 'prompt_info',
                                     '机构诚信信息': 'honesty_info'}
        org_honesty_info_td_list = selector.xpath("//*[@class='info-body']/div[10]//td")
        for index in range(0, len(org_honesty_info_td_list), 2):
            for k, v in org_honesty_info_mappings.items():
                if handle_string(org_honesty_info_td_list[index].xpath(".//text()").extract_first()) == k:
                    org_honesty_info[v] = handle_string(org_honesty_info_td_list[index+1].xpath('.//text()').extract_first())
        top_td_list = selector.xpath("//*[@class='info-body']/div[1]/div/table/tbody/tr/td")
        for k, v in org_honesty_info_mappings.items():
            if handle_string(top_td_list[0].xpath(".//text()").extract_first()) == k:
                top_tr_list = top_td_list[1].xpath(".//tr")
                for top_tr in top_tr_list:
                    keys = handle_string(top_tr.xpath("./td[1]//text()").extract_first())
                    values = top_tr.xpath("./td[2]//text()").extract()
                    if len(values) == 1:
                        org_honesty_info['honesty_info'][keys] = handle_string(values)
                    else:
                        org_honesty_info['honesty_info'][keys] = handle_string(''.join(values))
        org_honesty_info['manager_id'] = manager_id
        org_honesty_info['manager_name'] = manager_name
        org_honesty_info['detail_url'] = detail_url
        org_honesty_info['type'] = 'org_honesty_info'
        org_honesty_info['crawl_time'] = get_datetime(time.time())
        yield org_honesty_info

        # 添加外部字段
        detail_item['manager_id'] = manager_id
        detail_item['detail_url'] = detail_url
        detail_item['type'] = 'detail'
        detail_item['crawl_time'] = get_datetime(time.time())

        yield detail_item
