import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)
import re
# import requests
import scrapy
from mySpiders.items import MyspidersItem


class AnjukeSpider(scrapy.Spider):
    name = 'anjuke'
    allowed_domains = ['sy.fang.anjuke.com']
    start_urls = ['https://sy.fang.anjuke.com/']

    def parse(self, response):
        divs = response.xpath("//div[@class='key-list imglazyload']/div")
        # hrefs = response.xpath("//div[@class='key-list imglazyload']//a[@class='lp-name']")
        for div in divs:
            meta_dict = {}
            href = div.xpath(".//a[@class='lp-name']")
            href = href.xpath("./@href").extract_first().strip()
            id = re.search(r"https://sy.fang.anjuke.com/loupan/(\d+).html", href).group(1)
            img_src = div.xpath("./a[@class='pic']/img/@src").extract_first().strip()
            # try:
            #     meta_dict["img"] = self.upload_img(img_src, id)
            # except:
            #     pass

            meta_dict["name"] = div.xpath("./div[@class='infos']/a/span[@class='items-name']/text()").extract_first().strip()
            meta_dict["address"] = div.xpath("./div[@class='infos']/a/span[@class='list-map']/text()").extract_first().strip().replace("\xa0", "")
            huxings = div.xpath("./div[@class='infos']/a[@class='huxing']/span/text()").extract()
            # huxing = "/"
            # for hx in huxings[0:-1]:
            #     huxing += hx
            meta_dict["huxing"] = ",".join(huxings[0:-1])
            # meta_dict["huxing"] = div.xpath("./div[@class='infos']/a[@class='huxing']//span[@class='']/text()").extract_first().strip().replace("\n", "/")
            building_area = div.xpath("./div[@class='infos']/a[@class='huxing']//span[@class='building-area']/text()").extract_first().strip()
            meta_dict["building_area"] = re.search(r"建筑面积：(.*)㎡", building_area).group(1)
            meta_dict["onsale"] = div.xpath("./div[@class='infos']/a[@class='tags-wrap']/div/i[@class='status-icon onsale']/text()").extract_first().strip()
            meta_dict["wuyetp"] = div.xpath("./div[@class='infos']/a[@class='tags-wrap']/div/i[@class='status-icon wuyetp']/text()").extract_first().strip()
            # meta_dict["tag"] = div.xpath("./div[@class='infos']/a[@class='tags-wrap']/div/span[@class='tag']/text()").extract_first().strip().replace("\n", "/")
            tags = div.xpath("./div[@class='infos']/a[@class='tags-wrap']/div/span[@class='tag']/text()").extract()
            # tag = "/"
            # for tg in tags[0:-1]:
            #     tag += tg
            meta_dict["tag"] = ",".join(tags[0:-1])
            meta_dict["price"] = div.xpath("./a[@class='favor-pos']/p/span/text()").extract_first().strip()
            detail_url = "https://sy.fang.anjuke.com/loupan/canshu-{}.html".format(id)
            # meta_dict["detail_url"] = detail_url
            yield scrapy.Request(detail_url, callback=self.parse_detail, meta=meta_dict)
        next_url = response.xpath('//a[text()="下一页"]/@href').extract_first()
        if next_url != 'javascript:;' and next_url:
            print(next_url)
            # logger.warning(next_url)
            yield scrapy.Request(next_url, callback=self.parse)

    def parse_detail(self, response):
        meta_dict = response.meta
        # item = MyspidersItem()
        item = {}
        # item = response.meta
        # item['img'] = meta_dict["img"]
        item['name'] = meta_dict["name"]
        item['address'] = meta_dict["address"]
        item['huxing'] = meta_dict["huxing"]
        item['building_area'] = meta_dict["building_area"]
        item['onsale'] = meta_dict["onsale"]
        item['wuyetp'] = meta_dict["wuyetp"]
        item['tag'] = meta_dict["tag"]
        item['price'] = meta_dict["price"]
        # item['detail_url'] = meta_dict["detail_url"]
        # item['name'] = response.xpath("//div[@class='basic-details']//div[@class='basic-fst']/h1/text()").extract_first()
        can_items = response.xpath("//div[@class='can-left']/div[@class='can-item']")
        for can_item in can_items[:4]:
            list0 = []
            type = can_item.xpath("./div[@class='can-head']/*/text()").extract_first()
            uls = can_item.xpath("./div[@class='can-border']/ul")
            for ul in uls:
                item0 = {}
                lis = ul.xpath("./li")
                for li in lis:
                    key = li.xpath("./div[1]/text()").extract_first().strip().replace("\n", "")
                    val_table = li.xpath("./div[2]/*")
                    value = []
                    val_text = li.xpath("./div[2]/text()").extract_first()
                    if val_text.strip():
                        value.append(val_text.strip().replace("\n", ""))
                    for tab in val_table:
                        tab_text = tab.xpath("./text()").extract_first()
                        if tab_text:
                            value.append(tab_text.strip().replace("\n", ""))

                    item0[key] = value
                list0.append(item0)
            item[type] = list0
        print(item['name'])
        # return item

    # def upload_img(self, imgUrl, imgId):
    #     resp_img = requests.get(imgUrl)
    #     imgName = "{}.jpg".format(imgId)
    #     local_file = BASE_DIR + "/images/" + imgName
    #     f = open(local_file, 'wb')
    #     f.write(resp_img.content)
    #     f.close()
    #     files = [('file', (imgName, open(local_file, 'rb'), 'image/jpeg'))]
    #     headers = {
    #         'Authorization': 'eyJhbGciOiJIUzUxMiJ9.eyJyYW5kb21LZXkiOiJneHFqc2UiLCJzdWIiOiJ7XCJiZWxvbmdUeXBlXCI6MSxcImJlbG9uZ1wiOjYsXCJlbnRlcnByaXNlSWRcIjo5MCxcInVzZXJJZFwiOjEsXCJhY2NvdW50XCI6XCJhZG1pblwifSIsImV4cCI6MTYwNzM5NzUzNiwiaWF0IjoxNjA2NzkyNzM2fQ.ApLsYUMPR_U4Xi-liMQs6KX6QPAwgrFwVoR5f2kDDXXJyaA-g29fQ7ID49eUR17yfZyXQr7pJpLg65fFOjT3LQ'
    #     }
    #     upload_url = "https://sit.zooming-data.com/dist-admin/api/dist_uploadFile"
    #     upload_resp = requests.post(upload_url, headers=headers, files=files).json()
    #     access_url = upload_resp["data"]["access_url"]
    #     # access_url = upload_resp.data.access_url
    #     return access_url