#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 参考 :https://blog.csdn.net/liujiayu2/article/details/86007384

import json
import openpyxl
import pandas as pd
import requests
from bs4 import BeautifulSoup
from openpyxl.utils.dataframe import dataframe_to_rows
import time
import re # 正则
session = requests.session()

# ========================= generate urls ==========================

def generate_all_page_url(group, num):  # 生成url,一页25,start每次加25.
    url = 'https://www.douban.com/group/{}/discussion?start={}'
    yield url.format(group, num * 25)

# ========================= ==========================

def update_session():
    # 这里模拟一下请求头，头文件是从浏览器里面抓到的，否则服务会回复403错误，（其实就是服务器做的简单防爬虫检测）
    headers = {
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0',
        'Content-Encoding': 'br',
        'Content-Type': 'text/html; charset=utf-8',
        'Cookie': 'ga=GA1.2.1827418288.1539347605; __utmv=30149280.7165; douban-profile-remind=1; _pk_id.100001.8cb4=c3bb3082b8065555.1539347604.2.1539398612.1539347643.; gr_user_id=6dec4a4a-3c5e-4001-9864-48ca57265546; douban-fav-remind=1; __utmc=30149280; __utma=30149280.1827418288.1539347605.1548322624.1549869886.8; push_doumail_num=0; bid=HRYtsLFds7Q; dbcl2="71657042:XIUc0qJexp0"; ck=_0I8; push_noty_num=0; ap_v=0,6.0'
    }
    session.headers.clear()
    session.headers.update(headers)

def get_all_page_urls(page_url):  # 分析url解析出每一页的详细url
    update_session()
    res = session.get(page_url)

    # res = requests.get(page_url, 'lxml')
    if res.status_code == 200:
        soup = BeautifulSoup(res.text, 'html.parser')

        urls = []
        infos = soup.find_all('div', attrs={'class': 'info clear'})
        for a in infos:
            url = a.a.attrs['href']
            urls.append(url)
        return urls

def contains_in(text) :
    if (('男' in text ) or ('育新' in text)) :
        return True
    else :
        return False

def between85to91(text) : 
    content = text.replace('\n', '').replace('\r', '').replace(" ", "")
    rs = re.findall(r'\d+', content)
    print("rs:" + content + " ==>" + str(rs))
    for s in rs :
        age = int(s)
        print("age:" + str(age) + "=>" + str(age > 85 and age < 91))
        if (age > 85 and age < 91) or (age > 1985 and age < 1991) :
            return True
    return False

def get_page_by_url(page_url):  # 分析详细url获取所需信息
    strtime = time.strftime("%m-%d", time.localtime())
    print("get_page_by_url:" + strtime +" "+ page_url)
    update_session()
    try:
        res = session.get(page_url, timeout=(30, 120))

        # res = requests.get(page_url)
        if res.status_code == 200:
            info = {}
            soup = BeautifulSoup(res.text, "lxml")
            #print("soup:"+str(soup))

            for ul in soup.find_all('table', attrs={'class': 'olt'}):
                for li in ul.find_all('td', attrs={'class': 'title'}):
                    #print("time:"+str(li.parent.text))
                    text= li.text
                    
                    if strtime not in str(li.parent.contents[7].text) :
                        print("not today:"+ str(li.parent.contents[7].text))
                        break
                    if contains_in(text) :
                        url = li.a.attrs['href']
                        #print("url:" + url+ " text:" + str(li.text).strip())
                        title = li.text.strip()
                        if between85to91(title) :
                            info['text'] = title
                            info['url'] = url
            print("info:" + str(info))
            return info
    except Exception as e:
        print(str(e))
    return None

def writer_to_text(list):  # 储存到text
    with open('豆瓣.text', 'a', encoding='utf-8')as f:
        f.write(json.dumps(list, ensure_ascii=False) + '\n')
        f.close()

def main(url):
    info = get_page_by_url(url)
    if info == {}:
        return
    writer_to_text(info)  # 储存到text文件
    time.sleep(1)
    
if __name__ == '__main__':
    #https://www.douban.com/group/641424/discussion?start={}
    #https://www.douban.com/group/679445/discussion?start=50
    #https://www.douban.com/group/10658/discussion?start=50
    group={'641424', '679445', 'ganji', '10658'}
    for g in group:
        for i in range(0, 4):
            for url in generate_all_page_url(g, i):
                main(url)
            time.sleep(3)

