# coding=utf-8


import time
import csv
from selenium import webdriver
from selenium.webdriver.common.by import By


wb = webdriver.Edge()
wb.set_window_size(1000, 800)
wb.get('https://weibo.com/login.php')

time.sleep(20)


index = "甜食"
sheng = 44
year = 2020
month = 1
day1 = 1
day2 = 2
page = 1

url = "https://s.weibo.com/weibo?q={}&region=custom:{}:1000&typeall=1&suball=1&timescope=custom:{}-{}-{}:{}-{}-{}&Refer=g&page={}".format(
    index, sheng, year, month, day1, year, month, day2, page)
wb.get(url)

# 输入为csv文件的路径
outputExcel = r'D:\file\Python\weibo_data'

while month < 13:
    # 发布时间的列表time2 ,博文的列表 textAll
    time2 = []
    textAll = []
    flag = 0

    # 爬取当前时间段全部页的内容
    for yeshu in range(1, 51):
        page = yeshu
        try:
            # https://s.weibo.com/weibo?q=甜食&region=custom:44:1000&typeall=1&suball=1&timescope=custom:2020-1-1:2020-1-30&Refer=g&page=2
            url = "https://s.weibo.com/weibo?q={}&region=custom:{}:1000&typeall=1&suball=1&timescope=custom:{}-{}-{}:{}-{}-{}&Refer=g&page={}".format(
                index, sheng, year, month, day1, year, month, day2, page
            )
            wb.get(url)
            wb.implicitly_wait(2)

            # 错误检测
            try:
                errorTxt = ""
                merror = wb.find_element(By.XPATH, '//div[@class="card card-no-result s-pt20b40"]/p')
                errorTxt = merror.text
                # print(errorTxt)
                if errorTxt[0] == '抱' and errorTxt[1] == '歉':
                    flag += 1
                if flag > 4:
                    break
                continue
            except:
                pass
            # 爬取时间
            time1 = wb.find_elements(
                By.XPATH,
                # //*[@id="pl_feedlist_index"]/div[2]/div[1]/div/div[1]/div[2]/div[2]/a[1]
                '//*[@id="pl_feedlist_index"]/div[2]/div[1]/div/div[1]/div[2]/div[2]/a[last()-1]'
            )
            for i in time1:
                time2.append(i.text)

            # 先点击所有的展开原文
            temp = wb.find_elements(By.XPATH,
                                    '//*[@id="pl_feedlist_index"]/div[2]/div[3]/div/div[1]/div[2]/p[1]/a[2]')
            for i in temp:
                if (i.text[0] == "展" and i.text[1] == "开" and i.text[2] == "全"):
                    i.click()

            texttp = wb.find_elements(By.XPATH,
                                      '//*[@id="pl_feedlist_index"]/div[2]/div[3]/div/div[1]/div[2]/p[2]')
            for i in texttp:
                temp1 = ""
                temp = i.text
                for j in temp:
                    temp1 = temp1 + j
                textAll.append(temp1)
            # 如果没有展开全文按钮，就去没有展开全文按钮的段落爬
            texttp = wb.find_elements(By.XPATH,
                                      '//div[@class="card-feed"]/div[@class="content"]/p[@node-type="feed_list_content"]')
            for i in texttp:
                temp1 = ""
                temp = i.text
                for j in temp:
                    temp1 = temp1 + j
                # print("leng=",len(temp1),temp1)
                if (len(temp1) > 0):
                    textAll.append(temp1)

            time.sleep(1.1)
        except:
            continue
            # 爬取完50页

    # 导出csv文件
    for a, b in zip(textAll, time2):
        try:
            print(a)
            with open(outputExcel + '.csv', 'a', newline='') as f:
                csvwriter = csv.writer(f, dialect='excel')
                csvwriter.writerow([a, b])
        except:
            continue
    time.sleep(2)

    month += 1
    day2 = 30
    if month == 2:
        day2 = 28
    elif month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12:
        day2 = 31
    else:
        day2 = 30
