import scrapy, requests, json
from scrapy import Request, FormRequest
from fake_useragent import UserAgent
from xinzhuasearch.items import XinzhuasearchItem
from xinzhuasearch.untils.cities import cities, cities2
from xinzhuasearch.untils.utils_log import log_out, log
from xinzhuasearch import settings


class BaiduurlsSpider(scrapy.Spider):
    name = 'baiduurls'
    allowed_domains = ['baidu.com', 'icp.chinaz.com', 'data.chinaz.com']
    # start_urls = ['https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&ch=7&tn=02049043_8_pg&wd=北京市%20社保代缴&pn=00']
    start_urls = ['https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&ch=7&tn=02049043_8_pg&wd=北京市%20SEO&pn=00']
    urls = ['https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&ch=7&tn=02049043_8_pg&wd={}%20SEO&pn={}0']
    # urls = ['https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&ch=7&tn=02049043_8_pg&wd={}%20社保代缴&pn={}0']
    page = 0
    company_data_url = 'http://icp.chinaz.com/Home/QiYeData'
    ua = UserAgent()
    headers = {"User-Agent": ua.random}
    status_list = {
        '开业': 10,
        '在业': 10,
        '存续': 20,
        '吊销': 30,
        '注销': 40,
        '迁入': 50,
        '迁出': 60,
        '停业': 70,
        '清算': 80,
    }

    def parse(self, response):
        searchresults = response.xpath("//div[@class='result c-container new-pmd']")
        for searchresult in searchresults:
            titles = searchresult.xpath(".//h3/a").xpath('string(.)').extract()
            b_url = searchresult.xpath(".//h3/a/@href").extract()
            descs = searchresult.xpath(".//div[@class='c-abstract']/text()").extract()
            print(titles)
            titles2 = ''.join(titles)
            titles.clear()
            if '好看视频' in titles2 or '百度知道' in titles2 or '什么是' in titles2 or '百度文库' in titles2 or '腾讯视频' in titles2 or '微博正文' in titles2:
                continue
            else:
                titles.append(titles2)

                descs2 = ''.join(descs)
                descs.clear()
                descs.append(descs2)

                for title, url, desc in zip(titles, b_url, descs):
                    # item = {}
                    item = XinzhuasearchItem()
                    response = requests.get(url, allow_redirects=False, headers=self.headers)
                    if response.status_code > 200 and response.status_code < 400:
                        t_url = response.headers['Location']
                        main_url = t_url.split('//')[1].split('/')[0]
                        # murphy ======>
                        log('main_url', main_url)
                        parts = main_url.split('.')
                        main_url = '{}.{}'.format(parts[-2], parts[-1])

                        item['title'] = title
                        item['url'] = main_url
                        item['desc'] = desc
                        item['province'] = 110000
                        item['city'] = 110100
                        item['keyword'] = 'SEO'
                        # item['keyword'] = ' 社保代缴'

                        # log('title', title)
                        # log('check_main_url', main_url)
                        # log('desc', desc)

                        # data = 'Kw=csdn.net'
                        data = 'Kw={}'.format(main_url)
                        log('data', data)

                        yield Request(
                            url=self.company_data_url,
                            method='POST',
                            callback=self.company_details,
                            body=data,
                            meta={"item": item},
                            headers={
                                'Host': 'icp.chinaz.com',
                                'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
                                'User-Agent': self.ua.random,
                            }
                        )

        yield scrapy.Request(url=self.start_urls[0], callback=self.parse_pages)


    def parse_pages(self, response):
        '''
        遍历cities2，深度level2
        拼接下个城市的url
        '''
        china = cities2()
        provinces = china['data']
        for province in provinces:
            city = province['children']
            for c in city:
                name = c['name']

                data = {
                    'province': c['pid'],
                    'city': c['id'],
                    'keyword': 'SEO',
                    # 'keyword': name + ' 社保代缴',
                }
                if name == '北京市':
                    for p in range(1, 5):
                        new_page_url = self.urls[0].format(name, p)
                        yield Request(url=new_page_url, callback=self.next_page, meta={'data': data})
                else:
                    for p in range(0, 5):
                        new_page_url = self.urls[0].format(name, p)
                        yield Request(url=new_page_url, callback=self.next_page, meta={'data': data})

    def next_page(self, response):
        '''
        新页面处理
        '''
        data = response.meta['data']
        province = data['province']
        city = data['city']
        keyword = data['keyword']
        searchresults = response.xpath("//div[@class='result c-container new-pmd']")
        for searchresult in searchresults:
            titles = searchresult.xpath(".//h3/a").xpath('string(.)').extract()
            b_url = searchresult.xpath(".//h3/a/@href").extract()
            descs = searchresult.xpath(".//div[@class='c-abstract']/text()").extract()

            titles2 = ''.join(titles)
            titles.clear()
            if '好看视频' in titles2 or '百度知道' in titles2 or '什么是' in titles2 or '百度文库' in titles2 or '腾讯视频' in titles2 or '微博正文' in titles2:
                continue
            else:
                titles.append(titles2)

                descs2 = ''.join(descs)
                descs.clear()
                descs.append(descs2)

                t_url, icp_url = '', ''
                for title, url, desc in zip(titles, b_url, descs):
                    # item = {}
                    item = XinzhuasearchItem()
                    response = requests.get(url, allow_redirects=False, headers=self.headers)
                    if response.status_code > 200 and response.status_code < 400:
                        t_url = response.headers['Location']

                    main_url = t_url.split('//')[1].split('/')[0]
                    log('main_url', main_url)
                    parts = main_url.split('.')
                    if parts[-2] == 'com':
                        main_url = '{}.{}.{}'.format(parts[-3], parts[-2], parts[-1])
                    else:
                        main_url = '{}.{}'.format(parts[-2], parts[-1])

                    item['title'] = title
                    item['url'] = main_url
                    item['desc'] = desc
                    item['province'] = province
                    item['city'] = city
                    item['keyword'] = keyword

                    log('title', title)
                    log('check_main_url', main_url)
                    # log('desc', desc)

                    # data = 'Kw=csdn.net'
                    data = 'Kw={}'.format(main_url)
                    log('data', data)

                    yield Request(
                        url=self.company_data_url,
                        method='POST',
                        callback=self.company_details,
                        body=data,
                        meta={"item": item},
                        headers={
                            'Host': 'icp.chinaz.com',
                            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
                            'User-Agent': self.ua.random,
                        }
                    )

    def company_details(self, response):
        body = response.body.decode()
        item = response.meta["item"]
        data = json.loads(body)
        log('company_details', data)
        info = data['data']
        if info is not None:
            business_info_model = info['businessInfoModel']
            # 营业状态代码化
            status_raw = business_info_model['state']
            status = self.status_list.get(status_raw, '')

            # 注册资本数字化
            registered_capital = business_info_model['registeredCapital']
            if ',' in registered_capital:
                registered_capital = registered_capital.replace(',', '')
                if '万' in registered_capital:
                    rc_str = registered_capital.split('万')[0]
                    rc = float(rc_str) * 10000
                else:
                    rc = float(registered_capital)
            elif '-' in registered_capital:
                rc = ''
            else:
                if '万' in registered_capital:
                    rc_str = registered_capital.split('万')[0]
                    rc = float(rc_str) * 10000
                else:
                    rc = float(registered_capital)

            item["name"] = info['companyName']
            item["artificial_person"] = business_info_model['corporation']
            item["registered_capital"] = rc
            item["founded_at"] = business_info_model['registrationTime']
            item["tel"] = ""
            item["email"] = ""
            item["sort"] = "1"
            item["id_no"] = business_info_model['dutyParagraph']
            item["industry"] = business_info_model['industry']
            item["registered_address"] = business_info_model['registeredAddress']
            item["address"] = business_info_model['registeredAddress']
            item["business_scope"] = business_info_model['businessScope']
            item["type"] = business_info_model['type']
            item["status"] = status
            item["approval_date"] = business_info_model['approvalDate']
            # log('item', item)
            url = 'http://api.map.baidu.com/geocoding/v3/?address={}&output=json&ak={}'.format(item['registered_address'], settings.ak)
            # url = 'http://api.map.baidu.com/geocoding/v3/?address={}&output=json&ak={}&callback=showLocation'.format(item['registered_address'], settings.ak)
            yield Request(url=url, callback=self.baidu_api, meta={"item": item},)

    def baidu_api(self, response):
        item = response.meta["item"]
        body = response.text
        # body = response.body.decode()
        data = json.loads(body)
        log('baidu_api_data', data)
        status = data['status']
        if status == 0:
            location = data['result']['location']
            item["lng"] = location['lng']
            item["lat"] = location['lat']
        else:
            item["lng"] = ''
            item["lat"] = ''
        log('baidu item', item)
        # self.item_list.append(item)
        # result = json.dumps(self.item_list, ensure_ascii=False)
        # log_out(result)
        return item

    # <========
