# -*- coding: utf-8 -*-
import scrapy
from patencent.items import PatencentItem


class TencentSpider(scrapy.Spider):
    name = 'tencent'
    allowed_domains = ['esf.fang.com']
    # 1. 这里包含了最开始要发送的请求
    start_urls = ['https://www.fang.com/SoufunFamily.htm']
    # start_urls = ['https://esf.fang.com/']
    front_url = "https:"
    front_url_page = ""

    def parse(self, response):
        # // div[ @class ='outCont'] // li // a / text()
        # // div[ @class ='outCont'] // li // a /@href


        # 2. 获取招聘列表节点对象
        node_list = response.xpath("//div[@class='onCont']//tr/td//a")
        # 3. 循环创建新的item对象并抽取目标数据.
        # 该for循环若全部循环结束, 意味着什么?当前这一页爬完了.
        for node in node_list:
            item = PatencentItem()
            print(node)

            item["city_name"] = node.root.text
            temp_detail_url=node.attrib["href"]
            temp_detail_url=temp_detail_url.replace("http:","")
            detail_url = self.front_url + temp_detail_url
            detail_url=detail_url.replace(".fang",".esf.fang")
            item["detail_url"]=detail_url
            self.front_url_page=detail_url
            yield scrapy.Request(url=item["detail_url"], callback=self.detail_parse,
                                 meta={"item": item})



    def detail_parse(self, response):
        # 1. 获取到之前传递过来的item对象
        # all_item=[]
        #
        # node_lists = response.xpath("//div[@class='shop_list shop_list_4']/dl")
        # next_url_list=""
        # url_list = response.xpath("//div[@class='page_box']/div/p/a/@href").extract()
        # # print(url_list)
        # name = response.xpath("//div[@class='page_box']/div/p/a/text()").extract_first()
        # if len(url_list) == 4:
        #     next_url_list = url_list[2]
        # elif len(url_list) == 2 and name == "首页":
        #     return
        # elif len(url_list) == 2 and name == "下一页":
        #     next_url_list = url_list[0]
        # else:
        #     pass
        # item=response.meta["item"]
        # # print(item["detail_url"] + next_url_list)
        # if len(next_url_list)>0:
        #     yield scrapy.Request(url=item["detail_url"] + next_url_list, callback=self.detail_parse,
        #                          meta={"item": response.meta["item"]})

        node_lists = response.xpath("//div[@class='shop_list shop_list_4']/dl")
        a=len(node_lists)
        next_url_list=""
        for i in range(1,a+1):

            item = response.meta["item"]

            print(i)
            aa= response.xpath("//div[@class='shop_list shop_list_4']/dl["+str(i)+"]//dd//p[1]//text()")[0:10].extract()
            # item["home_description"]
            s=""
            for k in aa:
                k=k.replace(str("\t"), "") \
                .replace(str("\n"), "")
                s=s+str(k)

            item['home_description']=s
            home_title="//div[@class='shop_list shop_list_4']/dl["+str(i)+"]//dd/h4/a/span/text()"
            # print(home_title)

            s = response.xpath(home_title).extract_first()
            # s=s.replace(str("\t"),"").replace(str("\n"),"")
            item["home_title"]=s

            item["home_community_name"] = response.xpath("//div[@class='shop_list shop_list_4']/dl["+str(i)+"]//dd//p[2]/a/text()").extract_first()
            item["detail_location"] = response.xpath("//div[@class='shop_list shop_list_4']/dl["+str(i)+"]//dd/p[2]/span/text()").extract_first()
            total_price= response.xpath("//div[@class='shop_list shop_list_4']/dl["+str(i)+"]//dd[2]/span/b/text()").extract_first()
            item["total_price"]=str(total_price)+'万'
            item["unit_price"] = response.xpath("//div[@class='shop_list shop_list_4']/dl["+str(i)+"]//dd[2]/span[2]/text()").extract_first()
            import csv
            # 若存在文件，则打开csv文件，若不存在，则新建文件
            # 若不设置newline=""，则每行数据会隔一行空包行
            csvfile = open("csv_test.csv", "a", newline="")  # w是覆盖形写入，a是追加写入
            # 将文件加载到csv对象中
            writer = csv.writer(csvfile)
            # 写入一行数据
            # writer.writerow(['city_name', 'detail_url', 'home_description', 'home_title', 'home_community_name', 'detail_location', 'total_price', 'unit_price'])
            # 多行数据写入
            data = [
                (item["city_name"], item["detail_url"], item["home_description"], item["home_title"], item["home_community_name"], item["detail_location"], item["total_price"],item["unit_price"])
            ]
            writer.writerows(data)
            # 关闭csv对象
            csvfile.close()
            url_list = response.xpath("//div[@class='page_box']/div/p/a/@href").extract()

            name = response.xpath("//div[@class='page_box']/div/p/a/text()").extract_first()
            if len(url_list) == 4:
                next_url_list = url_list[2]
                print(url_list)
            # elif len(url_list) == 2 and name == "首页":
            #     print(999)
            #     return
            if len(url_list) == 2 and name == "下一页":
                next_url_list = url_list[0]

        if len(next_url_list)>0:
            ur=response.meta["item"]["detail_url"] + next_url_list.replace("//","/")

            print(ur)
            print()
            yield scrapy.Request(url=ur,callback=self.detail_parse,
                                     meta={"item": response.meta["item"]})




