import requests
from bs4 import BeautifulSoup
import time
import random
import pandas as pd


class ReadUrl(object):
    @staticmethod
    def read_url(url):
        """

        :param url:
        :return:
        """
        sleep_time = random.randint(1, 3)
        print('sleep {} seconds'.format(sleep_time))
        time.sleep(sleep_time)
        header_arr = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0',
                      'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36']
        header = {
            'User-Agent': random.choice(header_arr)
        }
        req = requests.get(url, headers=header)
        if req.status_code == 200:
            return BeautifulSoup(req.text)

    def parse_msg(self, url):
        """
        解析广告内容
        :param url:
        :return:
        """
        req_soup = self.read_url(url)
        if req_soup:
            req_info = req_soup.find('section', id='postingbody')
            total_msg = req_info.get_text()
            div_msg = req_info.find('div', class_="print-information print-qrcode-container").get_text() \
                if req_info.find('div', class_="print-information print-qrcode-container") else ''

            if len(total_msg) > 1:
                msg = total_msg[len(div_msg):]
                return msg.strip()

    def read_main(self, url):
        """

        :param url:
        :return:
        """
        arr = []
        html_soup = self.read_url(url)
        if html_soup:
            soup_arr = html_soup.find_all('a', class_='result-title hdrlnk')
            for num, soup in enumerate(soup_arr):
                print(num)
                if num > 70:
                    break
                ad_url = soup.get('href')
                if ad_url:
                    msg = self.parse_msg(url=ad_url)
                    if msg:
                        arr.append(msg)

        return arr


def run(city_arr, channel='mis'):
    # url = 'https://newyork.craigslist.org/d/missed-connections/search/mis'
    read_url = ReadUrl()

    info_arr = []
    for city in city_arr:
        print(city)
        url = 'https://{}.craigslist.org/d/missed-connections/search/{}'.format(city, channel)
        city_info = read_url.read_main(url)
        if city_info:
            info_arr.append(city_info)

    if info_arr:
        df = pd.DataFrame(info_arr).T
        df.columns = city_arr
        df.to_csv('./data/B/ch4_city_mis_ad.csv')


run(city_arr=['sfbay', 'newyork'])
