# -*- coding: utf-8 -*-
#from scrapy_yzd.items import jdItem
#import json
import time
import pandas as pd
import scrapy
#from scrapy.http import Request

ads_data = []
phone_data = []
res_urls = []


def save_csv():
    table = pd.DataFrame({'urls': res_urls, 'ads': ads_data, 'phone': phone_data})
    date = time.strftime('%Y%m%d', time.localtime(time.time()))
    table.to_csv(r'data/localbtcoin%s.csv' % (date), encoding="utf_8_sig")
    return


class localbtcoinSpider(scrapy.Spider):
    name = "localbtcoin"
    allowed_domains = ["localbitcoins.com"]
    start_urls =[
        "https://localbitcoins.com/?ch=2vvv"
    ]

    def parse(self, response):
        trade_urls = ['https://localbitcoins.com/ad/%d' % (100000 + i) for i in range(50000)]
        for cate_url in trade_urls:
            time.sleep(1)
            yield scrapy.Request(cate_url, callback=self.parse_page)

    def parse_page(self, response):
         # 返回留言中的数组
         content = response.selector.xpath('//p[@class="messagetext"]/text()').extract()
        
         # 取到的数据连接成字符串
         detail_ads = ','.join(content)

         if (len(detail_ads) > 0):
             print("ads url "+response.url)
             print (response)
             
             ads_data.append(detail_ads)
             #myphone = get_phone(detail_ads)
             #phone_data.append(myphone)
             res_urls.append(response.url)
             save_csv()
         else:
              print("ads is null "+response.url)










