import scrapy
import lxml.etree as le
import csv
import pandas as pd
# 返回唯一的xpath结果
def xpath_one(contentx, path, default=None):
    if type(contentx) == str or type(contentx) == bytes:
        contentx = le.HTML(contentx)
    rets = contentx.xpath(path)
    return rets[0] if rets else default


# 返回多个xpath的结果
def xpath_all(contentx, path, strip=False):
    if type(contentx) == str or type(contentx) == bytes:
        contentx = le.HTML(contentx)
    rets = contentx.xpath(path)
    if strip:
        ret_strips = []
        for ret in rets:
            ret_strips.append(ret.strip())
        return ret_strips
    else:
        return rets


# 合并的得到的结果
def xpath_union(contentx, path, sep='', strip=True, default=None):
    if type(contentx) == str or type(contentx) == bytes:
        contentx = le.HTML(contentx)
    rets = xpath_all(contentx=contentx, path=path, strip=strip)
    if rets:
        return sep.join(rets)
    else:
        return default


class P():

    href2 = '//ul[@class="sellListContent"]/li/a/@href'  # 2级页面的地址


class S1Spider(scrapy.Spider):
    name = 's1'

    def start_requests(self):
        # 读取csv文件
        with open(r'E:\BaiduNetdiskDownload\GAME\OCTOPATH2\hourse_spider\hourse_scrapy\cityUrl.csv', mode="r",  newline='') as f:
            reader = csv.reader(f)
            for idx,row in enumerate(reader):
                if idx == 0:
                    continue
                print(row)
                ct = row[1].split(r'//')[1].split('.')[0]
                city = row[0]
                page_start = 1
                page_end = 50
                for page in range(page_start, page_end + 1):
                    url = f'https://{ct}.lianjia.com/ershoufang/pg{page}/'
                    yield scrapy.Request(
                        url=url,
                        callback=self.parse1,
                        meta={
                            'city': city
                        }
                    )
    def parse1(self, response):

        for href in xpath_all(response.body, path=P.href2):
            yield scrapy.Request(
                url=href,
                callback=self.parse2,
                meta={
                    'href': href,
                    'city': response.meta['city']
                }
            )

    def parse2(self, response):
        href = response.meta['href']
        filename = href.split('/')[-1]
        item = {
            'filename': filename,
            'html': response.body,
            'city': response.meta['city']
        }
        yield item

# item = {'html': response.body}
# yield item
