# /usr/bin/env python
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup
from crawler import config
import os.path
from crawler.db.ImageInfoDBOperator import DBOperator, ImageInfo
from logs.Logger import Logger
import file_md5.FilePrint as file_print
import time
import sys
import urllib3

urllib3.disable_warnings()
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}

def getContext(url):
    source_code = requests.get(url, headers=headers, verify=False)
    source_code.encoding = 'utf-8'
    plain_text = source_code.text
    soup = BeautifulSoup(plain_text,  'lxml')
    return soup


def getLinks(prefix, start, end):
    links = []
    i = 0
    if end < start:
        print("end should be bigger than start")
        sys.exit()
    for i in range(start, end+1):
        print('context page :' + str(i))
        soup = getContext(prefix + str(i))
        trs = soup.find_all('tr',{'class': 'tr3 t_one'})
        for tr in trs:
            link = tr.find("a").get('href')
            if link.startswith('html_data'):
                links.append(link)
            # if link.startswith('read.php'):
            #     links.append(link)
        i = i + 1
    return links

def getLinks2(prefix, start, end):
    links = []
    i = 0
    if end < start:
        print("end should be bigger than start")
        sys.exit()
    for i in range(start, end+1):
        print('context page :' + str(i))
        soup = getContext(prefix + str(i))
        trs = soup.find_all('tr',{'class': 'tr3 t_one'})
        for tr in trs:
            link = tr.find("a").get('href')
            if link.startswith('read.php'):
                links.append(link)
            # if link.startswith('read.php'):
            #     links.append(link)
        i = i + 1
    return links

if __name__ == '__main__':
    prefix = "https://ll33---www.d0103519.cc/pw/thread.php?fid=106&page="
    link_prefix = "https://ll33---www.d0103519.cc/pw/"
    # 通过getLinks和getLinks2两个切换，
    # getLinks的模式：https://域名/pw/html_data/16/2411/
    # getLinks2的模式：https://域名//pw/read.php?tid=7672620&fpage=2
    links = getLinks2(prefix, 1, 100)
    for link in links:
        print(link_prefix + link)
