#!/usr/bin/python3
#coding:utf-8
__author__ = 'merci'
'''
图片代理采集接口
@ uumnt 		uumnt.com
@ mm131 		mm131.com
@ umei 			umei.cc
@ mmonly.cc 	mmonly.cc
'''

from flask import Flask,Blueprint,request,jsonify
from urllib import request as rq
from flask_cors import CORS
from aip import AipNlp
import requests
import ssl
import re
import os
import time
import random
import zlib
app = Flask(__name__)
#跨域
CORS(app, supports_credentials=True)

#图片目录URL
uploadurl = 'http://img.chunmeitu.com/uploadfile/'
#附件物理目录
uploaddir = '/data/www/chunmeitu.com/uploadfile/'
#当日图片目录
attchmentdir = time.strftime('%Y/%m%d/')

@app.route('/')
@app.route('/index')
def index():
    return 'Default page'

#爬取图片到本地
@app.route('/getimg', methods=['POST','GET'])
def getImg():
    copyfrom = request.form.get('copyfrom')
    html = request.form.get('url')
    #判断来路
    if copyfrom == "uumnt":
        result = uumnt(html)
    if copyfrom == "mm131":
        result = mm131(html)
    if copyfrom == "mmonly":
        result = mmonly(html)
    if copyfrom == "umei":
        result = umei(html)
    #下载列表图片
    try:
        img = downImg(html, result["img_list"])
        return jsonify({"code":1, "title":result["title"], "img":img})
    except Exception as e:
        print("down failed ! %s" % e)
        return jsonify({"code":0})

def uumnt(html):
    ssl._create_default_https_context = ssl._create_unverified_context
    response = rq.urlopen(html)
    if response.headers.get('Content-Encoding') and 'gzip' in response.headers.get('Content-Encoding'):
    	response = zlib.decompress(response.read(), 16 + zlib.MAX_WBITS)
    else:
    	response = response.read()
    response = response.decode('utf-8')
    #总页数规则
    reg1 = r'<h1 class="center">(.*)\(1/(.*)\)</h1>'
    page_src = re.compile(reg1)
    page = re.findall(page_src, response)
    #图片URL规则
    reg2 = r'<img src="(.*)" alt="(.*)" />'
    img_src =  re.compile(reg2)
    img_list = []
    i = 1
    while True:
        if i == 1:
            img = re.findall(img_src, response)
            img_list.append(img[0][0])
        if i > 1:
            html_more = html.replace(".html", "_%s.html" % str(i))
            response_more = rq.urlopen(html_more)
            if response_more.headers.get('Content-Encoding') and 'gzip' in response_more.headers.get('Content-Encoding'):
            	response_more = zlib.decompress(response_more.read(), 16 + zlib.MAX_WBITS)
            else:
            	response_more = response_more.read()
            response_more = response_more.decode('utf-8')
            img = re.findall(img_src, response_more)
            img_list.append(img[0][0])
        if i >= int(page[0][1]):
            break
        i = i + 1
    return {"title":page[0][0], "img_list":img_list}

def mm131(html):
    response = rq.urlopen(html)
    response = response.read()
    response = response.decode('gbk')
    #总页数匹配规则
    reg1 = r'<span class="page-ch">共(.*?)页</span>'
    page_src = re.compile(reg1)
    page = re.findall(page_src, response)
    #图片匹配规则
    reg2 = r'<img alt="(.*)" src="(.*)" />'
    img_src =  re.compile(reg2)
    #标题匹配规则
    reg3 = r'<h5>(.*)</h5>'
    title_src = re.compile(reg3)
    title = re.findall(title_src, response)
    img_list = []
    i = 1
    while True:
        if i == 1:
            img = re.findall(img_src, response)
            img_list.append(img[0][1])
        if i > 1:
            html_more = html.replace(".html", "_%s.html" % str(i))
            response_more = rq.urlopen(html_more)
            response_more = response_more.read()
            response_more = response_more.decode('gbk')
            img = re.findall(img_src, response_more)
            img_list.append(img[0][1])
        if i >= int(page[0]):
            break
        i = i + 1

    return {"title":title[0], "img_list":img_list}

def umei(html):
    response = rq.urlopen(html)
    if not response.headers.get('Content-Encoding'):
        response = response.read()
    else:
        response = zlib.decompress(response.read(), 16 + zlib.MAX_WBITS)
    response = response.decode('utf-8')
    #总页数匹配规则 
    reg1 = r'<a>共(.*?)页: </a>'
    page_src = re.compile(reg1)
    page = re.findall(page_src, response)
    #图片匹配规则
    reg2 = r'<img alt="(.*?)" alt="" src="(.*?)" />'
    img_src =  re.compile(reg2)
    #标题匹配规则
    reg3 = r'<strong>(.*?)</strong>'
    title_src =  re.compile(reg3)
    title = re.findall(title_src, response)

    img_list = []
    img = re.findall(img_src, response)
    img_list.append(img[0][1])

    for i in range(2,int(page[0])+1):
        html_more = html.replace(".htm", "_%s.htm" % str(i))
        response_more = rq.urlopen(html_more)
        if not response_more.headers.get('Content-Encoding'):
            response_more = response_more.read()
        else:
            response_more = zlib.decompress(response_more.read(), 16 + zlib.MAX_WBITS)
        response_more = response_more.decode('utf-8')
        img_more = re.findall(img_src, response_more)
        img_list.append(img_more[0][1])
    return {"title":title[0], "img_list":img_list}

def mmonly(html):
    response = rq.urlopen(html)
    response = response.read()
    response = response.decode('gbk')
    #总页数匹配规则
    reg1 = r'<a>共(.*?)页: </a>'
    page_src = re.compile(reg1)
    page = re.findall(page_src, response)
    #图片匹配规则
    reg2 = r'src="(.+\.jpg)"'
    img_src = re.compile(reg2)
    #标题匹配规则
    reg3 = r'<h1>(.*?)<span id="picnum">'
    title_src = re.compile(reg3)
    title = re.findall(title_src, response)

    img_list = []
    img = re.findall(img_src, response)
    img_list.append(img[0])

    for i in range(2,int(page[0])+1):
        html_more = html.replace(".html", "_%s.html" % str(i))
        response_more = rq.urlopen(html_more)
        response_more = response_more.read()
        response_more = response_more.decode('gbk')
        img_more = re.findall(img_src, response_more)
        img_list.append(img_more[0])
    return {"title":title[0], "img_list":img_list}

def downImg(referer, img_list):
    ssl._create_default_https_context = ssl._create_unverified_context
    #使用header头模拟浏览器
    headers = {
    	'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    	'accept-language':'zh-CN,zh;q=0.9',
    	'cache-control':'no-cache',
    	'cookie':'_uupv=8; UM_distinctid=1662b1555161bf-0aca1011cbd7e7-3c604504-e1000-1662b15551713c; _uupv=1; Hm_lvt_7c6c81d757e82ce847d3456362654eb0=1542553851,1542638969,1542721766,1542807170; CNZZDATA1274675106=1767632047-1538318801-%7C1542808923; Hm_lpvt_7c6c81d757e82ce847d3456362654eb0=1542812430',
    	'pragma':'no-cache',
        'Referer':referer,
        'upgrade-insecure-requests':'1',
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
    }
    #图片列表
    li = []
    for p in img_list:
        img = requests.get(p, headers=headers)   
        #判断进入目录是否存在
        webdir = os.path.join(uploaddir, attchmentdir)
        isExists = os.path.exists(webdir)
        if not isExists:
            os.makedirs(webdir, mode=0o777)
        #组装文件名前缀
        filename = str(time.strftime("%Y%m%d%H%I%S")) + str(random.randint(100, 999))
        #将采集文件对象内容写入图片
        try:
            with open(webdir + filename + ".jpg", 'wb') as f:
                f.write(img.content)
        except:
        	print("Image download failed !")
        #拼接图片URL压入li列表
        li.append(uploadurl + attchmentdir + filename + ".jpg")
    return li

if __name__ == '__main__':
   app.run(host='0.0.0.0', port=5000, debug = True)
