from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from fake_useragent import UserAgent
import time
import json
from urllib import request
import datetime
import os
import csv
from lxml import etree
import pyodbc
import hashlib


def md5Encode(str):
    m = hashlib.md5()
    m.update(str.encode('utf-8'))
    return m.hexdigest()


def optimize_str(str):
    return str.strip().replace('\n', '').replace('\r', '').replace('</br>', '').replace(',', '，')


def toCSV(item):
    updatetime = datetime.datetime.now().strftime('%Y%m%d')
    filename = 'data/%s.csv' % updatetime
    if not os.path.exists(filename):
        with open(filename, 'a', newline='', encoding='utf-8') as csvfile:
            f = csv.writer(csvfile)
            f.writerow(sorted(item.keys()))
    with open(filename, 'a', newline='', encoding='utf-8') as csvfile:
        f = csv.writer(csvfile)
        f.writerow([item[key] for key in sorted(item.keys())])


def get_html(keyword, start, end):
    url = "https://s.weibo.com/weibo/{}&xsort=hot&typeall=1&suball=1&timescope=custom:{}:{}&Refer=g".format(request.quote(request.quote(keyword)), start, end)
    chrome_options = Options()
    # chrome_options.add_argument('--headless')
    # prefs = {"profile.managed_default_content_settings.images": 2}
    # chrome_options.add_experimental_option("prefs", prefs)
    driver = webdriver.Chrome(chrome_options=chrome_options)
    driver.get(url)
    with open('login/cookies/weibo_cookies.txt', encoding="utf-8") as f:
        cookies_list = json.loads(f.read())
    for cookies in cookies_list:
        driver.add_cookie(cookie_dict=cookies)
    driver.refresh()
    driver.implicitly_wait(10)
    full_btns = driver.find_elements_by_xpath('//a[contains(text(),"展开全文")]')
    for full_btn in full_btns:
        full_btn.click()
    time.sleep(1)
    html = driver.page_source
    driver.close()
    return html.replace('\n', '').replace('\t', '')


def parse(html, keyword):
    weibo_list = []
    response = etree.HTML(html)
    items = response.xpath('//*[@id="pl_feedlist_index"]//div[@class="card-wrap"]')
    for item in items:
        data = {}
        nickname = item.xpath('.//a[@nick-name]/@nick-name')[0]
        content = ''.join(item.xpath('.//p[@node-type="feed_list_content_full"]//text()'))
        if not len(content):
            content = ''.join(item.xpath('.//p[@node-type="feed_list_content"]//text()'))
        forward = ''.join(item.xpath('.//a[@action-type="feed_list_forward"]/text()'))
        cmt = ''.join(item.xpath('.//a[@action-type="feed_list_comment"]/text()'))
        agree = ''.join(item.xpath('.//a[@action-type="feed_list_like"]//text()'))
        postdate = ''.join(item.xpath('.//a[contains(@suda-data,"wb_time")]/text()')).strip()
        device = ''.join(item.xpath('.//div[@class="content"]//a[@rel="nofollow"]/text()'))
        data = {
            'keyword': keyword,
            'nickname': nickname,
            'content': optimize_str(content),
            'forward': 0 if forward == '' else int(forward.replace('转发', '').strip()),
            'cmt': 0 if cmt == '' else int(cmt.replace('评论', '').strip()),
            'agree': 0 if agree == '' else int(agree),
            'postdatetime': postdate,
            # 'postdate': postdate[:10].replace('-', ''),
            'device': device,
            'updatetime': datetime.datetime.now().strftime('%Y%m%d%H%M%S'),
            # 'content_md5': md5Encode(optimize_str(content))
        }
        weibo_list.append(data)

    return [dict(t) for t in set([tuple(d.items()) for d in weibo_list])]


def get_date_list(begin_date, end_date):
    date_list = []
    begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
    end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
    while begin_date <= end_date:
        date_str = begin_date.strftime("%Y-%m-%d")
        date_list.append(date_str)
        begin_date += datetime.timedelta(days=1)
    return date_list


if __name__ == '__main__':
    begin_date = "2018-04-01"
    end_date = "2018-04-30"
    date_list = get_date_list(begin_date, end_date)
    with open('keywords.txt', encoding='utf-8') as f:
        keywords = f.read().split('\n')
    for keyword in keywords:
        for date in date_list:
            start = date + '-0'
            end = date + '-23'
            html = get_html(keyword, start, end)
            weibo_list = parse(html, keyword)
            print(weibo_list)
