# -*- coding: utf-8 -*-

import scrapy

import re
import logging
import pandas as pd
import datetime
from copy import copy, deepcopy
from HifoEsf.utils.anjuke_cities import ANJUKE_CITY_LIST
from scrapy_pyppeteer.request import PyppeteerRequest
from HifoEsf.items import CommunityItem, CaseItem

logger = logging.getLogger(__name__)


class AnjukeSpider(scrapy.Spider):
    """
    安居客二手房案例爬虫
    """
    name = 'anjuke'
    allowed_domains = ['anjuke.com']
    # start_urls = ['https://www.anjuke.com/sy-city.html']
    start_urls = ['http://icanhazip.com']

    custom_settings = {
        'CONCURRENT_REQUESTS': 2,
        'DEFAULT_REQUEST_HEADERS': {
            'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
            'accept-encoding': 'gzip, deflate, br',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        },
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'HifoEsf.middlewares.CustomRetryMiddleware': 500,
            'HifoEsf.middlewares.UserAgentMiddleware': 544,
            'HifoEsf.middlewares.OuterNetProxyMiddleware': 545,  # 启用redis中的代理，需要同时开启 RedisConnPipeline
            # 'HifoEsf.middlewares.AnjukeCookiesMiddleware': 546,
            'scrapy_pyppeteer.downloadermiddlewares.PyppeteerMiddleware': 566,
        },
        'ITEM_PIPELINES': {
            'HifoEsf.pipelines.RedisConnPipeline': 299,  # 启用redis中的代理，需要同时开启 XXXProxyMiddleware
            'HifoEsf.pipelines.PcdPipeline': 300,  # 用于省市区id处理
            'HifoEsf.pipelines.AnjukePipeline': 301,
            'HifoEsf.pipelines.MongoClientPipeline': 399,
        },
        'GERAPY_PYPPETEER_HEADLESS': True,
        'GERAPY_ENABLE_REQUEST_INTERCEPTION': True,
        'GERAPY_PYPPETEER_DEVTOOLS': False,
        'GERAPY_PYPPETEER_DUMPIO': True,
        'GERAPY_PYPPETEER_DOWNLOAD_TIMEOUT': 15,
        'RETRY_TIMES': 20,
        # 'COOKIES_ENABLED': False,  # 不携带cookies
        'CASE_ID_FILTER': True,  # 是否开启案例id去重
    }

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter
        super().__init__(*args, **kwargs)

    def parse(self, response, **kwargs):
        try:
            # 去重校验
            assert self.filter_flag != False, f'>>>>{self.name}:案例id去重配置失败<<<<'
            # 参数校验
            cities_li, type = self.get_crawl_or_filter_cities()
        except AssertionError as e:
            # 去重校验未通关，关闭爬虫
            self.crawler.engine.close_spider(self, e)
        except Exception as e:
            logger.error(e)
            # 校验未通过，关闭爬虫
            self.crawler.engine.close_spider(self, '参数错误')
        else:
            # 校验通过，城市预处理
            if cities_li:
                cities_df = pd.DataFrame(ANJUKE_CITY_LIST)
                # 爬取列表
                if type == 1:
                    # 如果爬取列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = cities_df.to_dict(orient='records')
                    else:
                        crawl_city_li = cities_df.loc[cities_df.city_name.isin(cities_li)].to_dict(orient='records')
                # 过滤列表
                else:
                    # 如果过滤列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = list()
                        self.crawler.engine.close_spider(self, '过滤列表包含【全国】')
                    # 不包含 全国
                    else:
                        crawl_city_li = cities_df.loc[~cities_df.city_name.isin(cities_li)].to_dict(orient='records')
                # 构造城市二手房请求
                for esf_request in self.create_city_request(crawl_city_li):
                    # todo 添加请求过滤条件
                    yield esf_request
                del cities_df
            else:
                logger.error('安居客新房城市url列表匹配为空')
                self.crawler.engine.close_spider(self, '安居客新房城市url列表匹配为空')

    def create_city_request(self, crawl_city_li):
        """
        构造城市二手房请求
        :param crawl_city_li: 爬取城市列表，包含url构造信息
        :return:
        """
        try:
            assert len(crawl_city_li)
        except:
            self.crawler.engine.close_spider(self, '安居客二手房城市爬取列表为空')
        else:
            for crawl_city_dict in crawl_city_li:
                # 城市二手房url标准化处理
                crawl_city_dict = self.format_crawl_city_dict(crawl_city_dict)
                if crawl_city_dict:
                    esf_url = crawl_city_dict['esf_url']
                    # 获取二手房房源（案例）行政区列表
                    page_num = 1
                    page_param = f'p{page_num}/' if page_num > 1 else ''
                    yield PyppeteerRequest(
                        esf_url.format(page_param),
                        callback=self.parse_district_list,
                        pretend=True,
                        wait_for='section.list-left>section.list h3.property-content-title-name',
                        meta=dict(crawl_city_dict=deepcopy(crawl_city_dict), ),
                        ignore_resource_types=['image', 'media'],
                        dont_filter=True,
                    )

    def parse_district_list(self, response):
        """
        获取二手房房源（案例）行政区列表
        :param response:
        :return:
        """
        crawl_city_dict = copy(response.meta['crawl_city_dict'])
        # 提取行政区分组
        a_obj_li = response.xpath("//ul[@class='region region-line2']/li[position()>1]/a")
        if len(a_obj_li):
            for a_obj in a_obj_li:
                district_dict = dict()
                district_dict['provinceName'] = crawl_city_dict['province_name']
                district_dict['cityName'] = crawl_city_dict['city_name']
                district_dict['cityUrl'] = crawl_city_dict['city_url']
                district_dict['districtName'] = a_obj.xpath("./text()").extract_first()
                district_dict['districtUrl'] = a_obj.xpath("./@href").extract_first()
                if district_dict['districtUrl'] and district_dict['districtName'] and '周边' not in district_dict[
                    'districtName']:
                    # 获取二手房房源（案例）商圈列表
                    yield PyppeteerRequest(
                        district_dict['districtUrl'],
                        callback=self.parse_shangquan_list,
                        pretend=True,
                        wait_for='section.list-left>section.list h3.property-content-title-name',
                        meta=dict(district_dict=deepcopy(district_dict), ),
                        ignore_resource_types=['image', 'media'],
                        dont_filter=True,
                    )
        else:
            logger.error('{}-{} 行政区列表获取为空'.format(crawl_city_dict['province_name'], crawl_city_dict['city_name']))

    def parse_shangquan_list(self, response):
        """
        获取二手房房源（案例）商圈列表
        :param response:
        :return:
        """
        district_dict = copy(response.meta['district_dict'])
        # 提取商圈分组
        a_obj_li = response.xpath("//ul[@class='region region-line3']/li[position()>1]/a[@title]")
        if len(a_obj_li):
            district_dict['shangQuan'] = list()
            for a_obj in a_obj_li:
                shangquan_dict = dict()
                shangquan_dict['provinceName'] = district_dict['provinceName']
                shangquan_dict['cityName'] = district_dict['cityName']
                shangquan_dict['districtName'] = district_dict['districtName']
                shangquan_dict['shangQuanName'] = self.deal_space_and_char(a_obj.xpath("./text()").extract_first())
                shangquan_dict['shangQuanUrl'] = a_obj.xpath("./@href").extract_first()
                district_dict['shangQuan'].append(shangquan_dict)

                # 构造请求，获取二手房房源（案例）列表首页
                yield PyppeteerRequest(
                    self.deal_case_list_url(shangquan_dict['shangQuanUrl']),
                    callback=self.parse_case_list,
                    pretend=True,
                    wait_for='section.list-left>section.list h3.property-content-title-name',
                    meta=dict(shangquan_dict=deepcopy(shangquan_dict), page_num=1, ),
                    ignore_resource_types=['image', 'media'],
                    dont_filter=True,
                )
            # todo 保存行政区信息
        else:
            logger.error(
                '{}-{}-{} 商圈列表获取为空'.format(district_dict['provinceName'], district_dict['cityName'],
                                           district_dict['districtName']))

    def parse_case_list(self, response):
        """
        获取二手房房源（案例）列表
        :param response:
        :return:
        """
        shangquan_dict = copy(response.meta['shangquan_dict'])
        page_num = copy(response.meta['page_num'])
        # 提取当前页案例信息
        div_obj_li = response.xpath("//section[@class='sort-line']/following-sibling::section[1]/div")
        if len(div_obj_li):
            post_date_li = self.get_case_date_li(response.body.decode())
            if len(div_obj_li) == len(post_date_li):
                fitment_name_li = self.get_case_fitment_li(response.body.decode())
                for index, div_obj in enumerate(div_obj_li):
                    item_case = CaseItem()
                    item_case['provinceName'] = shangquan_dict['provinceName']  # 省份
                    item_case['cityName'] = shangquan_dict['cityName']  # 城市
                    item_case['districtName'] = shangquan_dict['districtName']  # 行政区
                    item_case['shangQuan'] = shangquan_dict['shangQuanName']  # 商圈
                    item_case['name'] = div_obj.xpath(
                        ".//p[@class='property-content-info-comm-name']/text()").extract_first()  # 楼盘名称
                    address = div_obj.xpath(".//p[@class='property-content-info-comm-address']/span/text()").extract()
                    item_case['address'] = self.deal_address(address)  # 楼盘地址
                    # 去除广告案例、去除异常商圈案例
                    if item_case['name'] and item_case['address'] and (item_case['shangQuan'] in item_case['address']):
                        item_case['title'] = div_obj.xpath(
                            ".//h3[@class='property-content-title-name']/@title").extract_first()  # 标题
                        item_case['dataUrl'] = div_obj.xpath("./a/@href").extract_first()  # 案例url
                        item_case['caseId'] = self.get_case_id(item_case['dataUrl'])  # 案例id
                        item_case['bedroom'] = div_obj.xpath(
                            ".//p/span[contains(text(),'室')]/preceding-sibling::span[1]/text()").extract_first()  # 室
                        item_case['livingroom'] = div_obj.xpath(
                            ".//p/span[contains(text(),'厅')]/preceding-sibling::span[1]/text()").extract_first()  # 厅
                        item_case['bathroom'] = div_obj.xpath(
                            ".//p/span[contains(text(),'卫')]/preceding-sibling::span[1]/text()").extract_first()  # 卫
                        room_type = div_obj.xpath(".//div[@class='property-content-info']/p/span/text()").extract()
                        item_case['roomType'] = self.get_room_type(room_type)  # 户型
                        cont_info = div_obj.xpath(
                            ".//div[@class='property-content-info']/p[not(span)]/text()").extract()
                        cont_dict = self.get_room_cont_info(cont_info)
                        item_case['buildArea'] = cont_dict.get('buildArea', None)  # 建筑面积
                        item_case['directionType'] = cont_dict.get('directionType', None)  # 朝向
                        item_case['floor'] = cont_dict.get('floor', None)  # 所在楼层区
                        item_case['totalFloor'] = cont_dict.get('totalFloor', None)  # 总楼层
                        item_case['buildYear'] = cont_dict.get('buildYear', None)  # 建筑年代
                        item_case['totalPrice'] = div_obj.xpath(
                            ".//p[@class='property-price-total']//text()").extract()  # 总价
                        item_case['price'] = div_obj.xpath(
                            ".//p[@class='property-price-average']/text()").extract_first()  # 单价
                        item_case['tag'] = div_obj.xpath(
                            ".//span[@class='property-content-info-tag']/text()").extract()  # 标签
                        item_case['listingDate'] = post_date_li[index]  # 挂牌日期
                        item_case['decorationType'] = fitment_name_li[index] if fitment_name_li else None  # 装修
                        yield item_case
                        # todo 代理数量减少，导致代理封禁更加严重          2023/06/02
                        # 根据案例caseId去重处理
                        # if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('anjuke_esf_case_id',
                        #                                                              item_case['caseId'])) or (
                        #         not self.settings['CASE_ID_FILTER']):
                        #     # 构造请求，获取二手房房源（案例）详情
                        #     yield PyppeteerRequest(
                        #         item_case['dataUrl'],
                        #         pretend=True,
                        #         callback=self.parse_case_detail,
                        #         meta=dict(item_case=deepcopy(item_case), ),
                        #         wait_for='div.props-right>div.maininfo>div.maininfo-price',
                        #         ignore_resource_types=['image', 'media'],
                        #         dont_filter=False,
                        #     )
            else:
                logger.error('{}-{}-{}-{}-第{}页 案例列表数量与案例日期数量不一致'.format(shangquan_dict['provinceName'],
                                                                        shangquan_dict['cityName'],
                                                                        shangquan_dict['districtName'],
                                                                        shangquan_dict['shangQuanName'], page_num))
        else:
            logger.warning('{}-{}-{}-{}-第{}页 案例列表为空'.format(shangquan_dict['provinceName'], shangquan_dict['cityName'],
                                                            shangquan_dict['districtName'],
                                                            shangquan_dict['shangQuanName'], page_num))

        # 翻页
        next_page_url = response.xpath("//section//a[@class='next next-active']/@href").extract_first()
        next_page_num = page_num + 1
        if next_page_url:
            yield PyppeteerRequest(
                next_page_url,
                callback=self.parse_case_list,
                pretend=True,
                wait_for='section.list-left>section.list h3.property-content-title-name',
                meta=dict(shangquan_dict=deepcopy(shangquan_dict), page_num=deepcopy(next_page_num), ),
                ignore_resource_types=['image', 'media'],
                dont_filter=True,
            )
        else:
            logger.warning(
                '{}-{}-{}-{}-第{}页 案例列表url为空'.format(shangquan_dict['provinceName'], shangquan_dict['cityName'],
                                                    shangquan_dict['districtName'],
                                                    shangquan_dict['shangQuanName'], next_page_num))

    def parse_case_detail(self, response):
        """
        获取二手房房源（案例）详情
        :param response:
        :return:
        """
        # 提取案例详情信息
        item_case = copy(response.meta['item_case'])
        item_case['decorationType'] = response.xpath(
            "//div[contains(@class,'maininfo-model-item-2')]/div[@class='maininfo-model-weak']/text()").extract_first()  # 装修
        tag = response.xpath("//div[@class='maininfo-tags']/span/text()").extract()
        item_case['tag'] = self.deal_tag(item_case['tag'], tag)  # 标签
        item_case['property'] = response.xpath(
            "//span[contains(text(),'产权性质')]/following-sibling::span[1]/text()").extract_first()  # 产权性质
        item_case['type'] = response.xpath(
            "//span[contains(text(),'物业类型')]/following-sibling::span[1]/text()").extract_first()  # 物业类型
        item_case['propertyYears'] = response.xpath(
            "//span[contains(text(),'产权年限')]/following-sibling::span[1]/text()").extract_first()  # 物业类型
        year = response.xpath("//span[contains(text(),'房本年限')]/following-sibling::span[1]/text()").extract_first()
        item_case['isFiveYear'], item_case['isTwoYear'] = self.deal_two_or_five(year)  # 是否满五年、是否满两年
        resp_cont = response.body.decode()
        item_case['location'] = self.get_coord(resp_cont)  # 经纬度
        item_case['listingDate'] = self.get_listing_date(resp_cont)  # 挂牌日期
        yield item_case

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    @staticmethod
    def format_crawl_city_dict(crawl_city_dict):
        """
        城市二手房url标准化处理
        :param crawl_city_dict:
        :return:
        """
        try:
            city_url = crawl_city_dict['city_url']
            assert city_url
        except:
            return
        else:
            crawl_city_dict['esf_url'] = city_url + '/sale/{}'
            return crawl_city_dict

    def check_params(self, params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        try:
            if isinstance(params, str):
                try:
                    params = regex_1.sub(',', params).split(',')
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数输入错误，请重新输入，例如： scrapy crawl anjuke -a crawl=重庆，北京')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            elif isinstance(params, list):
                try:
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            else:
                raise AssertionError('参数类型不支持')
        except Exception as e:
            logger.error(e)
            self.crawler.engine.close_spider(self, '参数错误')

    @staticmethod
    def get_total_project_num(total_project):
        """
        提取楼盘数
        :param total_project:
        :return:
        """
        try:
            ret = int(re.findall(r'\d+', total_project)[0])
            assert ret
        except:
            return None
        else:
            return ret

    @staticmethod
    def deal_space_and_char(string: str):
        """
        处理空白、换行符、特殊字符等
        :param string:
        :return:
        """
        try:
            ret = ''.join(string.split())
            assert ret
        except:
            return
        else:
            return ret

    @staticmethod
    def get_room_type(room_type: list):
        """
        获取户型
        :param room_type:
        :return:
        """
        try:
            ret = ''.join([i for i in room_type if i and i.strip()])
            assert ret
        except:
            return
        else:
            return ret

    def get_room_cont_info(self, cont_info: str):
        """
        提取案例部分字段信息（建筑面积、朝向、所在楼层区、总楼层、建筑年代）
        :param cont_info:
        :return:
        """
        ret_dict = dict()
        try:
            ret_li = [''.join(i.split()) for i in cont_info]
            for ret in ret_li:
                if '㎡' in ret:
                    ret_dict['buildArea'] = ret
                elif len(re.findall(r'[东南西北]', ret)):
                    ret_dict['directionType'] = ret
                elif '层' in ret:
                    ret_dict['floor'], ret_dict['totalFloor'] = self.deal_floor(ret)
                elif '年' in ret:
                    ret_dict['buildYear'] = ret
        except Exception as e:
            logger.error(e)
            return ret_dict
        else:
            return ret_dict

    @staticmethod
    def get_case_id(case_url: str):
        """
        提取案例id
        :param case_url:
        :return:
        """
        try:
            ret = case_url.split('?')[0].split('view/')[1]
            assert ret
        except:
            return
        else:
            return ret

    @staticmethod
    def deal_floor(floor: str):
        """
        处理楼层
        :param floor:
        :return:
        """
        try:
            total_floor = re.findall(r'共(.*?)层', floor)[0]
        except:
            total_floor = None
        return floor, total_floor

    @staticmethod
    def deal_address(address: list):
        """
        处理楼盘地址
        :param address:
        :return:
        """
        try:
            ret = '-'.join([i.strip() for i in address if i and i.strip()])
        except:
            return
        else:
            return ret

    @staticmethod
    def deal_case_list_url(url: str):
        """
        处理案例列表url
        :param url:
        :return:
        """
        try:
            ret_li = url.split('?')
            # 按面积排序
            if len(ret_li) == 1:
                ret = ret_li[0] + 'o2/'
            elif len(ret_li) == 2:
                ret = ret_li[0] + 'o2/?' + ret_li[1]
            else:
                ret = url
        except:
            return url
        else:
            return ret

    @staticmethod
    def deal_tag(tag1, tag2):
        """
        处理案例标签字段
        :param tag1:
        :param tag2:
        :return:
        """
        try:
            tag1 = tag1 if tag1 else list()
            tag2 = tag2 if tag2 else list()
            ret = tag1 + tag2
        except:
            return tag1
        else:
            return ret

    @staticmethod
    def deal_two_or_five(year):
        """
        判断是否 满二 or 满五
        :param year:
        :return:
        """
        try:
            assert year
            if '五' in year:
                ret1 = True
                ret2 = True
            elif '二' in year:
                ret1 = False
                ret2 = True
            else:
                ret1 = False
                ret2 = False
        except:
            return False, False
        else:
            return ret1, ret2

    @staticmethod
    def get_coord(cont: str):
        """
        提取经纬度
        :param cont:
        :return:
        """
        regex = re.compile(r'coord=(\d+\.\d+,\d+\.\d+?)"')
        try:
            ret_li = regex.findall(cont)
            assert len(ret_li) == 1
            ret = ret_li[0]
            coord = tuple([float(i) for i in ret.split(',')])
        except:
            return None
        else:
            return coord

    def get_listing_date(self, cont: str):
        """
        提取挂牌日期
        :param cont:
        :return:
        """
        regex = re.compile(r'post_date:"(\d+)",')
        try:
            ret = regex.findall(cont)[0]
            listing_date = self.transfer_date(ret)
        except:
            return
        else:
            return listing_date

    @staticmethod
    def transfer_date(stamp):
        """
        将时间戳转换为date
        :param stamp:
        :return:
        """
        date_obj = datetime.datetime.fromtimestamp(int(stamp) / 1000 if len(str(stamp)) > 10 else int(stamp), None)
        date_str = date_obj.strftime("%Y-%m-%d")
        return date_str

    def get_case_date_li(self, cont: str):
        """
        提取案例列表页日期
        :param cont:
        :return:
        """
        try:
            case_list_str = re.findall(r',list:\[{.*?}\],categories', cont)[0]
            post_date_li = re.findall(r'post_date:(.*?),', case_list_str)
            post_date_li_fmt = list()
            for post_date_str in post_date_li:
                try:
                    post_date_num = int(re.findall(r'\d+', post_date_str)[0]) * 1000
                    post_date = self.transfer_date(post_date_num)
                except:
                    post_date_li_fmt.append(None)
                else:
                    post_date_li_fmt.append(post_date)
        except:
            return
        else:
            return post_date_li_fmt

    def get_case_fitment_li(self, cont: str):
        """
        提取案例列表页装修
        :param cont:
        :return:
        """
        try:
            case_list_str = re.findall(r',list:\[{.*?}\],categories', cont)[0]
            fitment_name_li = re.findall(r'fitment_name:(.*?),', case_list_str)
            # 处理变量名
            params_name_li = re.findall(r'window.__NUXT__=\(function\((.*?)\)', cont)[0].split(',')
            # 处理变量值
            params_value_str = re.findall(r'}\((.*?)\)\);', cont)[0]
            params_value_str = re.sub(r',"(\d+\|\d+\|.*?)",', ',"",', params_value_str)
            params_value_str = re.sub(r',[\u4e00-\u9fa5]+', '', params_value_str)
            params_value_str = re.sub(r'"[\u4e00-\u9fa5][\u4e00-\u9fa50-9\-,\(\)]{5,}"', '""', params_value_str)
            params_value_li = [i.replace('"', '') for i in params_value_str.split(',')]
            assert len(params_name_li) == len(params_value_li)
            # 生成变量字典
            params_dict = dict(zip(params_name_li, params_value_li))
            fitment_name_li_fmt = list()
            for fitment_name in fitment_name_li:
                if all(['装' not in fitment_name, '毛坯' not in fitment_name]):
                    fitment_name = params_dict.get(fitment_name, None)
                else:
                    fitment_name = fitment_name.replace('"', '')
                fitment_name_li_fmt.append(fitment_name)
        except:
            return list()
        else:
            return fitment_name_li_fmt
