# -*- coding: utf-8 -*-
import json

import scrapy

from doumiweb2.items import DoumiwebItem, CompanyItem, DataItem


def str_without(txt):
    return txt.replace("\n", "").replace(" ", "").replace("\xa0", "").strip()


class DoumitaskSpider(scrapy.Spider):
    name = 'doumitask'
    allowed_domains = ['doumi.com']
    base_url = 'http://doumi.com/'
    next_page = 'bj/'

    start_urls = [
        base_url + 'bj/appsw/',
        # base_url + next_page
    ]

    def parse(self, response):
        # / html / body / div[7] / div[1] / div[4] / span[2]
        # /html/body/div[7]/div[1]/div[4]/span[1]/text()
        c_url = self.base_url + self.next_page
        # self.next_page = response.xpath("//a[@class='next']/@href").extract()[0]
        for item_sel in response.xpath("//div[@class='jzList-item clearfix']"):
            task_item = DoumiwebItem()

            content_url = item_sel.xpath("./div[2]/div/h3/a/@href").extract()[0]
            title = item_sel.xpath("./div[2]/div/h3/a/text()").extract()[0]
            working_time = item_sel.xpath("./div[2]/ul/li[1]/span/text()").extract()[0]
            working_place = item_sel.xpath("./div[2]/ul/li[3]/text()").extract()[0]
            work_type = item_sel.xpath("./div[2]/ul/li[2]/text()").extract()[0]
            need_num = item_sel.xpath("./div[2]/ul/li[4]/text()").extract()[0]
            settlement_method = item_sel.xpath("./div[4]/span[2]/text()").extract()[0]
            salary1 = ''
            for salary_item in item_sel.xpath("./div[4]/span[1]/text()").extract():
                salary1 = salary_item
            salary2 = item_sel.xpath("./div[4]/span[1]/em/text()").extract()[0]
            task_item['c_url'] = c_url
            task_item['content_url'] = str_without(content_url)
            task_item['title'] = str_without(title)
            task_item['working_place'] = str_without(working_place)
            task_item['working_time'] = str_without(working_time)
            task_item['work_type'] = str_without(work_type)
            task_item['need_num'] = str_without(need_num)
            task_item['settlement_method'] = str_without(settlement_method)
            task_item['salary'] = str_without(salary2 + salary1)

            yield scrapy.Request(self.base_url + content_url,
                                 meta={'task': task_item},
                                 callback=self.parse_task_item)

        # yield scrapy.Request(self.base_url + self.next_page, callback=self.parse)

    def parse_task_item(self, response):
        task_item = response.meta['task']

        # / html / body / div[4] / div[2] / div[1] / div[6] / a
        working_place_details = []
        for weizhi in response.xpath("//div[@id='work-addr-open']/div[@class='jz-d-area']"):
            # print("weizhi  =", weizhi)
            weizhi_temp = weizhi.xpath("./text()").extract()[0]
            weizhi_temp = str_without(weizhi_temp)
            working_place_details.append(weizhi_temp)
        company_url = response.xpath("//div[@class='cpy-intro-link']/a/@href").extract()[0]
        content_sel = response.xpath("//div[@id='description-box']/p/text()").extract()
        content = json.dumps(content_sel)
        task_item['details'] = content
        task_item['working_place_details'] = working_place_details
        # print('====', task_item)
        # return task_item
        yield scrapy.Request(self.base_url + company_url,
                             meta={'task': task_item},
                             callback=self.parse_company_item)

    def parse_company_item(self, response):
        task_item = response.meta['task']
        company_tiem = CompanyItem()
        # /html/body/div[3]/div[1]/div[2]/ul[]/li[1]/b
        company_tiem['name'] = response.xpath("//div[@class='company-title']/h2/text()").extract()
        company_tiem['logo'] = response.xpath("//div[@class='company-image']/img/@src").extract()
        company_tiem['job_num'] = response.xpath("//ul[@class='company-desc clearfix']/li[1]/b/text()").extract()
        company_tiem['msg_reply'] = response.xpath("//ul[@class='company-desc clearfix']/li[2]/b/text()").extract()
        company_tiem['deal_rate'] = response.xpath("//ul[@class='company-desc clearfix']/li[3]/b/text()").extract()
        company_tiem['evalute_tate'] = response.xpath("//ul[@class='company-desc clearfix']/li[4]/b/text()").extract()
        company_tiem['type'] = response.xpath("//ul[@class='ident-list']/li[1]/text()").extract()
        company_tiem['state'] = response.xpath("//ul[@class='ident-list']/li[2]/text()").extract()
        company_tiem['money'] = response.xpath("//ul[@class='ident-list']/li[3]/text()").extract()
        company_tiem['industry'] = response.xpath("//ul[@class='ident-list']/li[4]/text()").extract()
        company_tiem['birthday'] = response.xpath("//ul[@class='ident-list']/li[5]/text()").extract()
        company_tiem['address'] = response.xpath("//ul[@class='ident-list']/li[6]/text()").extract()
        company_tiem['address_dj'] = response.xpath("//ul[@class='ident-list']/li[7]/text()").extract()
        company_tiem['credit_no'] = response.xpath("//ul[@class='ident-list']/li[8]/text()").extract()
        company_tiem['organization_no'] = response.xpath("//ul[@class='ident-list']/li[9]/text()").extract()
        company_tiem['scope'] = response.xpath("//ul[@class='ident-list']/li[10]/text()").extract()

        data_item = DataItem()
        data_item['task'] = task_item
        data_item['company'] = company_tiem
        print('公司===', company_tiem)
        return data_item
