#coding=utf-8
import logging

import requests
from bs4 import BeautifulSoup
import re;
from urllib import parse;
import os;

import threading

#from org.db.connectDb import insertZol;


urls = set();
urls_image_html =set();

file_root_path = "F:/tmp/1/"
def parseHtml(url):

    response = requests.get(url);
    response.encoding = 'gb2312';
    soup = BeautifulSoup(response.text, 'html.parser', from_encoding='gb2312');
    links = soup.find_all("a",href=re.compile("/bizhi/\w"));


def parseDetailHtml(url):
    urls.add(url);

    response = requests.get(url);
    response.encoding = 'gb2312';
    soup = BeautifulSoup(response.text, 'html.parser', from_encoding='gb2312');

    imageDetail = {};

    title = soup.find("a",id="titleName");
    if title:
        imageDetail["title"] = title.get_text();
    links = soup.find_all("a", id="1920x1200");
    for link in links:
        href = parse.urljoin(url, link['href']);
        #print(href)
        #parseImageHtml(href);
        if href not in urls_image_html:
            imageDetail["href"] = href;

            urls_image_html.add(href);

    links = soup.find_all("a", href=re.compile("/bizhi/\w"));

    for link in links:
        href = link["href"];
        if not href.startswith("http://sj.zol.com.cn"):
            href = parse.urljoin(url, link['href']);
            if href not in urls:
                parseDetailHtml(href);

    pass;


def parseImageHtml(url):
    response = requests.get(url);
    response.encoding = 'gb2312';
    soup = BeautifulSoup(response.text, 'html.parser', from_encoding='gb2312');
    link = soup.find("img",src=re.compile("\w.jpg"));
    if link:
        saveImage(link["src"],formatName(url));

def downloadImage(imagepath):
    response = requests.get(imagepath, stream=True);
    byte = response.content;
    response.close();
    return byte;

def saveImage(imagepath,filename):

    byte = downloadImage(imagepath);
    bf = [file_root_path,filename,".",formatSuffix(imagepath)];
    if os.path.exists("".join(bf)):
        return;
    filepath = "".join(bf);
    file = open(filepath,"wb");
    logging.info(filename)
    #insertZol(filename,imagepath,filepath)
    file.write(byte);
    if not file.closed:
        file.close();
def formatName(image_html_url):
    start_index = image_html_url.rindex("/");
    end_index = image_html_url.rindex(".");
    return image_html_url[start_index+1:end_index];

def formatSuffix(url):
    return url[url.rindex(".")+1:len(url)];

#存储到数据库
def addDB(param):
    pass;

def exc():

    while True:
        if len(urls_image_html)>0:
            image_html = urls_image_html.pop();
            print(image_html)
            threading._start_new_thread(parseImageHtml,(image_html,))


    pass;

if __name__=="__main__":
    #parseImageHtml("http://desk.zol.com.cn/showpic/1920x1080_86937_131.html")
    #parseDetailHtml("http://desk.zol.com.cn/bizhi/6998_86937_2.html");

    #parseHtml("http://desk.zol.com.cn/1920x1080/");
    #str1 = "http://desk.zol.com.cn/showpic/1920x1080_86937_131.html";
    #print(str1[str1.rindex(".")+1:len(str1)])
    #saveImage("http://desk.fd.zol-img.com.cn/t_s1920x1080c5/g5/M00/0A/0F/ChMkJljCOqiIC2nQAAtOpt4p2pEAAaoGwOxtVkAC06-976.jpg","1920x1080_86937_131");
    ##print(open("").)

    threading._start_new_thread(exc,());
    parseDetailHtml("http://desk.zol.com.cn/bizhi/6508_80123_2.html");
    #print(os.path.exists(file_root_path+"1920x1080_1027_37.jpg"))


    pass;


