# -*- coding: utf-8 -*-
# @Time    : 2019/12/23 8:38
# @Author  : ZSQ
# @Email   : zsq199170918@163.com
# @FileName: sfpm.py
# @Software: PyCharm
import scrapy

import re
import json
import random
import string
import logging
import demjson
import datetime
import urllib.parse
from lxml import etree
from redis import StrictRedis
from copy import copy, deepcopy
from SFPM_spider.items import SfItem
from SFPM_spider.utils.redis_pool import FILTER_POOL as POOL
from SFPM_spider.utils.transfer_time import datetime_to_timestamp
from SFPM_spider.constants import SPM_PROVINCE_DICT, SPM_CITY_DICT, SPM_DISTRICT_DICT, CATEGORY_DICT, CASE_STATUS_DICT

logger = logging.getLogger(__name__)


class SfpmSpider(scrapy.Spider):
    name = 'sfpm'
    allowed_domains = ['sf.taobao.com', 'sf-item.taobao.com', 'desc.alicdn.com', 'susong-item.taobao.com', ]
    start_urls = [
        # 住宅用房
        'https://sf.taobao.com/item_list.htm?spm=a213w.7398504.filter.2.{}&category=50025969&auction_source=0&sorder=2&st_param=-1&auction_start_seg=-1',
        # 商业用房
        'https://sf.taobao.com/item_list.htm?spm=a213w.7398504.filter.3.{}&category=200782003&auction_source=0&sorder=2&st_param=-1&auction_start_seg=-1',
        # 工业用房
        'https://sf.taobao.com/item_list.htm?spm=a213w.7398504.filter.4.{}&category=200788003&auction_source=0&sorder=2&st_param=-1&auction_start_seg=-1',
        # 其他用房
        'https://sf.taobao.com/item_list.htm?spm=a213w.7398504.filter.5.{}&category=200798003&auction_source=0&sorder=2&st_param=-1&auction_start_seg=-1',
    ]
    base_url = 'https://sf.taobao.com/item_list.htm?spm=a213w.7398504.filter.{}.{}&'  # 用于url构造
    base_case_url = 'https://sf-item.taobao.com/sf_item/{}.htm?spm=a213w.7398504.paiList.{}.{}'  # 用于案例详情页url构造
    base_page_url = 'https://sf.taobao.com/item_list.htm?location_code={}&category={}&auction_source=0&sorder=2&st_param=-1&auction_start_seg=-1&page={}'  # 用于案例列表页翻页url构造
    base_page_url_2 = 'https://sf.taobao.com/item_list.htm?category={}&auction_source=0&city={}&sorder=2&st_param=-1&auction_start_seg=-1&page={}'  # 用于案例列表页翻页url构造(全市)
    base_download_attach_url = 'https://sf.taobao.com/download_attach.do?attach_id={}'  # 用于案例附件url构造
    base_image_url = 'https://img.alicdn.com'  # 用于案例图片url构造
    base_confirm_url = 'https://sf.taobao.com/json/getSfDealConfirm.do?itemId={}'  # 用于竞价成功确认书url构造
    base_records_page_url = 'https://sf-item.taobao.com/json/get_bid_records.htm?id={}&records_type=pageRecords&currentPage={}'  # 用于竞买/应买记录翻页url构造

    def start_requests(self):
        sfpm_province_str, sfpm_city_dict = self._get_province_and_city()
        agent = random.choice(self.settings['USER_AGENTS'])
        headers = {
            'User-Agent': agent,
            'referer': 'https://sf.taobao.com/item_list.htm?spm=a213w.7398504.filter.60.{}&auction_source=0&sorder=2&st_param=-1&auction_start_seg=-1'.format(
                self._get_random_str()),
        }
        start_urls = [i.format(self._get_random_str()) for i in self.start_urls]
        for index, start_url in enumerate(start_urls):
            yield scrapy.Request(
                start_url,
                headers=headers,
                meta=dict(index=deepcopy(index), sfpm_province_str=sfpm_province_str, sfpm_city_dict=sfpm_city_dict),
                priority=1,
            )

    def parse(self, response):
        """
        获取 省/直辖市 列表
        :param response:
        :return:
        """
        index = copy(response.meta['index'])
        sfpm_province_str = copy(response.meta['sfpm_province_str'])
        sfpm_city_dict = copy(response.meta['sfpm_city_dict'])
        province_li = response.xpath("//div[text()='标的物所在地']/following-sibling::div/ul/li")
        if len(province_li):
            for i, province in enumerate(province_li):
                province_url = province.xpath("./em/a/@href").extract_first()
                province_name = province.xpath("./em/a/text()").extract_first()
                try:
                    spm_4 = SPM_PROVINCE_DICT.get(province_name)
                    province_url = self._deal_url(province_url, spm_4)
                except:
                    logging.error('{}({})  省/直辖市url构造出错'.format(province_name, i + 1))
                else:
                    if sfpm_province_str and ('全国' in sfpm_province_str or province_name in sfpm_province_str):
                        item = SfItem()
                        item['province'] = province_name
                        item['categoryName'] = self._get_category(self.start_urls[index])
                        headers = dict(referer=self.start_urls[index].format(self._get_random_str()))
                        yield scrapy.Request(
                            province_url,
                            headers=headers,
                            callback=self.parse_city_li,
                            meta=dict(item=deepcopy(item), sfpm_city_dict=sfpm_city_dict),
                            priority=2,
                        )
        else:
            logging.error('{}  获取省/直辖市列表获取失败'.format(response.request.url))

    def parse_city_li(self, response):
        """
        获取 城市 列表
        :param response:
        :return:
        """
        item = copy(response.meta['item'])
        sfpm_city_dict = copy(response.meta['sfpm_city_dict'])
        city_li = response.xpath("//li[@class='triggle J_FilterCity']")
        sfpm_city_li = sfpm_city_dict.get(item['province'], list())
        if len(city_li):
            for i, city in enumerate(city_li):
                item['city'] = city.xpath(".//a/text()").extract_first()
                city_url = city.xpath(".//a/@href").extract_first()
                try:
                    spm_4 = SPM_CITY_DICT[item['province']] + i
                    city_url = self._deal_url(city_url, spm_4)
                except:
                    logging.error(
                        '{}-{}-{}({})  城市url构造出错'.format(item['categoryName'], item['province'], item['city'], i + 1))
                else:
                    headers = self._deal_spm_headers(response.request.url)
                    if sfpm_city_li and item['city'] in sfpm_city_li:
                        yield scrapy.Request(
                            city_url,
                            headers=headers,
                            callback=self.parse_district_li,
                            meta=dict(item=deepcopy(item)),
                            priority=3,
                        )
                    if not sfpm_city_li:
                        yield scrapy.Request(
                            city_url,
                            headers=headers,
                            callback=self.parse_district_li,
                            meta=dict(item=deepcopy(item)),
                            priority=3,
                        )
        else:
            logger.error('{}-{} 城市列表获取失败'.format(item['categoryName'], item['province']))

    def parse_district_li(self, response):
        """
        获取 行政区/区县 列表
        :param response:
        :return:
        """
        item = copy(response.meta['item'])
        district_li = response.xpath("//div[@class='sub-condition J_SubCondition  small-subcondion']/ul/li")
        if len(district_li):
            for i, district in enumerate(district_li):
                item['districtName'] = district.xpath(".//a/text()").extract_first()
                district_url = district.xpath(".//a/@href").extract_first()
                try:
                    spm_4 = SPM_DISTRICT_DICT[item['province']] + i
                    district_url = self._deal_url(district_url, spm_4)
                except:
                    logging.error(
                        '{}-{}-{}-{}({})  行政区域/区县url构造出错'.format(item['categoryName'], item['province'], item['city'],
                                                                 item['districtName'], i + 1))
                else:
                    headers = self._deal_spm_headers(response.request.url)
                    yield scrapy.Request(
                        district_url,
                        headers=headers,
                        callback=self.parse_case_li,
                        meta=dict(item=deepcopy(item), page=1, flag=True, ),
                        priority=4,
                    )
        else:
            logger.warning('{}-{}-{} 获取行政区域/区县列表失败'.format(item['categoryName'], item['province'], item['city']))
            item['districtName'] = response.xpath(
                "//div[@class='sub-condition J_SubCondition  small-subcondion']/div/a/text()").extract_first()
            district_url = response.xpath(
                "//div[@class='sub-condition J_SubCondition  small-subcondion']/div/a/@href").extract_first()
            try:
                spm_4 = SPM_DISTRICT_DICT[item['province']] - 1
                district_url = self._deal_url(district_url, spm_4)
            except:
                logging.error(
                    '{}-{}-{}-{}  行政区域/区县url构造出错'.format(item['categoryName'], item['province'], item['city'],
                                                         item['districtName']))
            else:
                headers = self._deal_spm_headers(response.request.url)
                yield scrapy.Request(
                    district_url,
                    headers=headers,
                    callback=self.parse_case_li,
                    meta=dict(item=deepcopy(item), page=1, flag=False, ),
                    priority=4,
                )

    def parse_case_li(self, response):
        """
        获取 案例 列表
        :param response:
        :return:
        """
        item = copy(response.meta['item'])
        page = copy(response.meta['page'])
        flag = copy(response.meta['flag'])
        total_num_str = response.xpath("//h1/text()").extract_first()
        data_str = response.xpath("//script[@id='sf-item-list-data']/text()").extract_first()
        try:
            total_num = int(self._get_digit(total_num_str))
            total_page = total_num // 32 if not total_num % 32 else total_num // 32 + 1
            data_dict = json.loads(data_str)
            data_li = data_dict['data']
        except Exception as e:
            logger.error(
                '{}-{}-{}-{} 第{}页 获取案例列表失败  error_msg:{}'.format(item['categoryName'], item['province'], item['city'],
                                                                 item['districtName'], page, e))
        else:
            if total_num and data_li:
                redis_conn = StrictRedis(connection_pool=POOL)
                for i, data in enumerate(data_li):
                    item['caseId'] = data['id']
                    item['caseTitle'] = data['title']
                    item['caseStatus'] = self._get_status(data['sellOff'], data['status'])
                    item['currentPrice'] = data['currentPrice']
                    item['consultPrice'] = data['consultPrice']
                    item['marketPrice'] = data['marketPrice']
                    item['viewerCount'] = data['viewerCount']
                    item['bidCount'] = data['bidCount']
                    item['delayCount'] = data['delayCount']
                    item['applyCount'] = data['applyCount']
                    item['startTime'] = self.transfer_date(data['start'])
                    item['endTime'] = self.transfer_date(data['end'])
                    # 利用redis集合特性，进行案例增量爬取
                    if redis_conn.sadd('sfpm_case_id', item['caseId']):
                        case_url = self.base_case_url.format(item['caseId'], i + 1, self._get_random_str())
                        item['caseUrl'] = case_url
                        headers = self._deal_spm_headers(response.request.url)
                        yield scrapy.Request(
                            case_url,
                            headers=headers,
                            callback=self.parse_case_detail,
                            meta=dict(item=deepcopy(item)),
                            priority=6,
                        )
                redis_conn.close()

            # 翻页
            if page < total_page:
                if flag:
                    try:
                        page += 1
                        location_code, category = self._get_page_params(response.request.url)
                        assert location_code and category
                    except Exception as e:
                        logger.error(
                            '{}-{}-{}-{} 第{}页 翻页请求参数获取失败  error_msg:{}'.format(item['categoryName'], item['province'],
                                                                               item['city'], item['districtName'], page,
                                                                               e))
                    else:
                        next_page_url = self.base_page_url.format(location_code, category, page)
                        yield scrapy.Request(
                            next_page_url,
                            callback=self.parse_case_li,
                            meta=dict(item=deepcopy(item), page=deepcopy(page), flag=True, ),
                            priority=5,
                        )
                else:
                    try:
                        page += 1
                        category, city = self._get_page_params_2(response.request.url)
                        assert category and city
                    except Exception as e:
                        logger.error(
                            '{}-{}-{}-{} 第{}页 翻页请求参数获取失败  error_msg:{}'.format(item['categoryName'], item['province'],
                                                                               item['city'], item['districtName'], page,
                                                                               e))
                    else:
                        next_page_url = self.base_page_url_2.format(category, city, page)
                        yield scrapy.Request(
                            next_page_url,
                            callback=self.parse_case_li,
                            meta=dict(item=deepcopy(item), page=deepcopy(page), flag=False, ),
                            priority=5,
                        )

    def parse_case_detail(self, response):
        """
        获取 案例 详情
        :param response:
        :return:
        """
        item = copy(response.meta['item'])
        initial_price = response.xpath("//span[text()='起 拍 价']/following-sibling::span/span/text()").extract_first()
        margin_money = response.xpath("//span[text()='保 证 金']/following-sibling::span/span/text()").extract_first()
        images_li = response.xpath("//div[@class='slide-bigpic']/img/@data-ks-lazyload").extract()
        notice_url = response.xpath("//div[@id='J_NoticeDetail']/@data-from").extract_first()
        instruction_url = response.xpath("//div[@id='J_ItemNotice']/@data-from").extract_first()
        attachment_url = response.xpath("//p[@id='J_DownLoadFirst']/@data-from").extract_first()
        description_url = response.xpath("//div[@id='J_desc']/@data-from").extract_first()
        item['initialPrice'] = self._format_price(initial_price)
        item['marginMoney'] = self._format_price(margin_money)
        item['mediaUrl'] = response.xpath("//div[@id='player']/@data-src").extract_first()
        item['imageUrls'] = [urllib.parse.urljoin(self.base_image_url, i) for i in images_li] if images_li else []
        try:
            notice_url = self._create_url(notice_url)
            instruction_url = self._create_url(instruction_url)
            attachment_url = self._create_url(attachment_url)
            assert notice_url and instruction_url
            description_url = 'https:' + description_url
            confirm_url = self._create_url(self.base_confirm_url)
            confirm_url = confirm_url.format(item['caseId']) if '已结束' in item['caseStatus'] else ''
        except Exception as e:
            logger.error(
                '{}-{}-{}-{}-{} 竞买公告、竞买须知、标的物介绍等url构造失败 ，error_msg:{}'.format(item['categoryName'],
                                                                              item['province'], item['city'],
                                                                              item['districtName'], item['caseTitle'],
                                                                              e))
        else:
            headers = self._deal_spm_headers(item['caseUrl'])
            yield scrapy.Request(
                notice_url,
                headers=headers,
                callback=self.parse_auction_notice,
                meta=dict(item=deepcopy(item),
                          instruction_url=deepcopy(instruction_url),
                          attachment_url=deepcopy(attachment_url),
                          description_url=deepcopy(description_url),
                          confirm_url=deepcopy(confirm_url),
                          ),
                priority=10,
            )

    def parse_auction_notice(self, response):
        """
        获取 竞买公告html
        :param response:
        :return:
        """
        item = copy(response.meta['item'])
        instruction_url = copy(response.meta['instruction_url'])
        attachment_url = copy(response.meta['attachment_url'])
        description_url = copy(response.meta['description_url'])
        confirm_url = copy(response.meta['confirm_url'])
        try:
            ret_str = response.body.decode('gb2312', 'ignore')
            html_str = self._match_html(ret_str)
            assert html_str
        except:
            logger.error(
                '{}-{}-{}-{}-{} 竞买公告html提取失败'.format(item['categoryName'], item['province'], item['city'],
                                                     item['districtName'], item['caseTitle']))
            item['auctionNotice'] = response.request.url
        else:
            item['auctionNotice'] = html_str
        finally:
            headers = self._deal_spm_headers(item['caseUrl'])
            yield scrapy.Request(
                instruction_url,
                headers=headers,
                callback=self.parse_auction_instruction,
                meta=dict(item=deepcopy(item),
                          attachment_url=deepcopy(attachment_url),
                          description_url=deepcopy(description_url),
                          confirm_url=deepcopy(confirm_url),
                          ),
                priority=10,
            )

    def parse_auction_instruction(self, response):
        """
        获取 竞买须知html
        :param response:
        :return:
        """
        item = copy(response.meta['item'])
        attachment_url = copy(response.meta['attachment_url'])
        description_url = copy(response.meta['description_url'])
        confirm_url = copy(response.meta['confirm_url'])
        try:
            ret_str = response.body.decode('gb2312', 'ignore')
            html_str = self._match_html(ret_str)
            assert html_str
        except:
            logger.error(
                '{}-{}-{}-{}-{} 竞买须知html提取失败'.format(item['categoryName'], item['province'], item['city'],
                                                     item['districtName'], item['caseTitle']))
            item['auctionInstruction'] = response.request.url
        else:
            item['auctionInstruction'] = html_str
        finally:
            headers = self._deal_spm_headers(item['caseUrl'])
            yield scrapy.Request(
                description_url,
                headers=headers,
                callback=self.parse_description,
                meta=dict(item=deepcopy(item),
                          attachment_url=deepcopy(attachment_url),
                          confirm_url=deepcopy(confirm_url),
                          ),
                priority=10,
            )

    def parse_description(self, response):
        """
        获取 标的物介绍html
        :param response:
        :return:
        """
        item = copy(response.meta['item'])
        attachment_url = copy(response.meta['attachment_url'])
        confirm_url = copy(response.meta['confirm_url'])
        try:
            ret_str = response.body.decode('gb2312', 'ignore')
            html_str = self._match_html(ret_str)
            assert html_str
            html_obj = etree.HTML(html_str)
            imgs_url_li = html_obj.xpath("//div[@class='slide-bigpic']/img/@src")
            imgs_url_li = [urllib.parse.urljoin(self.base_image_url, i) for i in imgs_url_li] if imgs_url_li else []
        except:
            logger.error(
                '{}-{}-{}-{}-{} 标的物介绍html提取失败'.format(item['categoryName'], item['province'], item['city'],
                                                      item['districtName'], item['caseTitle']))
            item['description'] = response.request.url
        else:
            item['description'] = html_str
            item['imageUrls'] = imgs_url_li + item['imageUrls']
        finally:
            if attachment_url:
                headers = self._deal_spm_headers(item['caseUrl'])
                yield scrapy.Request(
                    attachment_url,
                    headers=headers,
                    callback=self.parse_attachment_info,
                    meta=dict(item=deepcopy(item),
                              confirm_url=deepcopy(confirm_url),
                              ),
                    priority=10,
                )
            else:
                item['attachmentInfo'] = None
                if confirm_url:
                    headers = self._deal_spm_headers(item['caseUrl'])
                    yield scrapy.Request(
                        confirm_url,
                        headers=headers,
                        callback=self.parse_purchase_confirm,
                        meta=dict(item=deepcopy(item)),
                        priority=10,
                    )
                else:
                    item['purchaseConfirm'] = None
                    item['bidRecords'] = None
                    yield item

    def parse_attachment_info(self, response):
        """
        获取 附件信息
        :param response:
        :return:
        """
        item = copy(response.meta['item'])
        confirm_url = copy(response.meta['confirm_url'])
        try:
            ret_str = response.body.decode('gb2312', 'ignore')
            ret_li = self._match_attachment_or_confirm(ret_str)
        except:
            logger.error(
                '{}-{}-{}-{}-{} 附件信息提取失败'.format(item['categoryName'], item['province'], item['city'],
                                                 item['districtName'], item['caseTitle']))
            item['attachmentInfo'] = response.request.url
        else:
            attachment_li = []
            if ret_li:
                for ret_dict in ret_li:
                    ret_dict['url'] = self.base_download_attach_url.format(ret_dict['id'])
                    attachment_li.append(ret_dict)
            item['attachmentInfo'] = attachment_li if attachment_li else None
        finally:
            if confirm_url:
                headers = self._deal_spm_headers(item['caseUrl'])
                yield scrapy.Request(
                    confirm_url,
                    headers=headers,
                    callback=self.parse_purchase_confirm,
                    meta=dict(item=deepcopy(item)),
                    priority=10,
                )
            else:
                item['purchaseConfirm'] = None
                item['bidRecords'] = None
                yield item

    def parse_purchase_confirm(self, response):
        """
        获取 竞价成功确认书dict
        :param response:
        :return:
        """
        item = copy(response.meta['item'])
        try:
            ret_str = response.body.decode(errors='ignore')
            ret_dict = self._match_attachment_or_confirm(ret_str)
            confirm_dict = ret_dict.get('purchaseConfirm', None)
            assert confirm_dict
        except:
            logger.error(
                '{}-{}-{}-{}-{} 竞价成功确认书信息提取失败'.format(item['categoryName'], item['province'], item['city'],
                                                      item['districtName'], item['caseTitle']))
            item['purchaseConfirm'] = None
        else:
            item['purchaseConfirm'] = confirm_dict
        finally:
            records_page_url = self._create_url(self.base_records_page_url.format(item['caseId'], 1))
            headers = self._deal_spm_headers(item['caseUrl'])
            yield scrapy.Request(
                records_page_url,
                headers=headers,
                callback=self.parse_bid_records,
                meta=dict(item=deepcopy(item)),
                priority=10,
            )

    def parse_bid_records(self, response):
        """
        获取 竞买记录/应买记录
        :param response:
        :return:
        """
        item = copy(response.meta['item'])
        try:
            ret_str = response.body.decode(encoding='gb2312', errors='ignore')
            ret_dict = self._match_attachment_or_confirm(ret_str)
            records_li = ret_dict.get('records', None)
            records_num = ret_dict.get('totalCnt', 0)
            assert records_li
            assert int(records_num) == int(item['bidCount'])
        except Exception as e:
            logger.error(
                '{}-{}-{}-{}-{} 竞买/应买记录提取失败，error_msg:{}'.format(item['categoryName'], item['province'], item['city'],
                                                                 item['districtName'], item['caseTitle'], e))
            item['bidRecords'] = response.request.url
        else:
            item['bidRecords'] = records_li
        finally:
            yield item

        # todo 翻页(目前只需要爬取第一页)

    def _get_province_and_city(self):
        """
        获取需要爬取的 省份/直辖市、城市
        :return: <tuple>   ('重庆,四川',{'四川': ['成都','泸州']})
        """
        try:
            province_li = self.settings['SFPM_PROVINCE'] if self.settings['SFPM_PROVINCE'] != None else ['全国']
            assert isinstance(province_li, list)
            province_str = ','.join(province_li)
            city_dict = self.settings['SFPM_CITY'] if self.settings['SFPM_CITY'] else dict()
            assert isinstance(city_dict, dict)
            for province, city_li in city_dict.items():
                assert province in province_li and city_li and isinstance(city_li, list)
        except:
            logger.error(
                'SFPM_PROVINCE、SFPM_CITY 参数配置出错，请重新配置，参考如下：\nSFPM_PROVINCE = {}\nSFPM_CITY = {}'.format(['重庆', '四川'], {
                    '重庆': ['重庆', ], '四川': ['成都', '泸州', ], }))
            return '', dict()
        else:
            return province_str, city_dict

    @staticmethod
    def _get_random_str():
        return ''.join(
            [random.choice(string.ascii_letters) if random.randint(0, 1) else random.choice(string.digits) for _ in
             range(14)])

    @staticmethod
    def _get_category(_url):
        regex = re.compile(r'category=(\d+)&')
        try:
            ret_li = regex.findall(_url)
            ret = CATEGORY_DICT.get(int(ret_li[0]), None)
            return ret
        except:
            return

    def _deal_url(self, _url, _spm_4):
        regex = re.compile(r'.*\?')
        _url = self.base_url + regex.sub('', _url)
        _url = _url.format(_spm_4, self._get_random_str())
        return _url

    @staticmethod
    def _get_digit(_str):
        regex = re.compile(r'\d+')
        ret_li = regex.findall(_str)
        return ret_li[0] if ret_li else None

    @staticmethod
    def _get_status(_sell_off, _status):
        try:
            sell_off = str(_sell_off).strip().lower()
            status = str(_status).strip().lower()
            return CASE_STATUS_DICT[sell_off][status]
        except:
            return

    @staticmethod
    def transfer_date(stamp):
        try:
            date_obj = datetime.datetime.fromtimestamp(int(stamp) / 1000, None)
            date_str = date_obj.strftime("%Y-%m-%d %H:%M:%S")
        except:
            return
        else:
            return date_str

    def _deal_spm_headers(self, _url):
        regex_1 = re.compile(r'spm=(.*?)&')
        regex_2 = re.compile(r'spm=(.*)')
        try:
            if '&' in _url:
                ret_li = regex_1.findall(_url)[0].split('.')
                ret_str = 'spm=' + '.'.join([i if len(i) != 14 else self._get_random_str() for i in ret_li]) + '&'
                url = regex_1.sub(ret_str, _url)
            else:
                ret_li = regex_2.findall(_url)[0].split('.')
                ret_str = 'spm=' + '.'.join([i if len(i) != 14 else self._get_random_str() for i in ret_li])
                url = regex_2.sub(ret_str, _url)
            headers = dict(referer=url)
        except:
            return dict(referer=_url)
        else:
            return headers

    @staticmethod
    def _get_page_params(_url):
        regex_1 = re.compile(r'location_code=(.*?)&')
        regex_2 = re.compile(r'category=(.*?)&')
        try:
            location_code = regex_1.findall(_url)[0]
            category = regex_2.findall(_url)[0]
        except:
            return None, None
        else:
            return location_code, category

    @staticmethod
    def _get_page_params_2(_url):
        regex_1 = re.compile(r'category=(.*?)&')
        regex_2 = re.compile(r'city=(.*?)&')
        try:
            category = regex_1.findall(_url)[0]
            city = regex_2.findall(_url)[0]
        except:
            return None, None
        else:
            return category, city

    @staticmethod
    def _format_price(_str):
        regex = re.compile(r',')
        try:
            _ret = float(regex.sub('', _str))
        except:
            return
        else:
            return _ret

    def _create_url(self, _url):
        regex = re.compile(r'amp;')
        try:
            _url = regex.sub('', _url)
            _url = urllib.parse.urljoin('https://sf-item.taobao.com', _url)
            _url_li = _url.split('?')
            base_params_str = '?_ksTS={}_{}&callback=jsonp{}&'
            time_stamp = self.get_timestamp()
            random_int = self.get_random_int()
            params_str = base_params_str.format(time_stamp, random_int, random_int + 1)
            _url = _url_li[0] + params_str + _url_li[1]
        except:
            return
        else:
            return _url

    @staticmethod
    def get_timestamp():
        datetime_obj = datetime.datetime.now()
        timestamp = datetime_to_timestamp(datetime_obj)
        return timestamp

    @staticmethod
    def get_random_int(_start=100, _end=1000):
        return random.randrange(_start, _end)

    @staticmethod
    def _match_html(_str):
        regex = re.compile(r'<.*>', re.S)
        try:
            ret_li = regex.findall(_str)
            ret_str = ret_li[0].replace('\\', '')
        except:
            return
        else:
            return ret_str

    @staticmethod
    def _match_attachment_or_confirm(_str):
        regex = re.compile(r'[(](.*)[)]', re.S)
        ret_li = regex.findall(_str)
        try:
            ret_obj = json.loads(ret_li[0])
        except:
            ret_obj = demjson.decode(ret_li[0])
        return ret_obj
