# coding=utf-8

# 获取access_token
import getopt
import json
import urllib2

import re

import MySQLdb
import oss2
import requests
import sys
from bs4 import BeautifulSoup

reload(sys)
sys.setdefaultencoding('utf-8')

Host = "118.31.32.197"
Port = 3306
User = "gmj"
Passwd = "123456"
Db = "worldcup"

conn = MySQLdb.connect(host=Host, port=Port, user=User, passwd=Passwd, db=Db, charset="utf8")
cursor = conn.cursor(cursorclass=MySQLdb.cursors.DictCursor)

BASE_URL = 'http://weixin.sogou.com'

UA = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"

AccessKeyId = 'LTAIv4prXZw4ElHB'
AccessKeySecret = 'POaKwz4kFzpFxfnfbBbU2xAILJ8APg'
Endpoint = 'oss-cn-beijing.aliyuncs.com'
Bucket = 'worldcuptemp'
downloadRetryTimes = 3
uploadRetryTimes = 3

auth = oss2.Auth(AccessKeyId, AccessKeySecret)
bucket = oss2.Bucket(auth, Endpoint, Bucket)


def download(url, retry_time):
    input = requests.get(url)
    if input.status_code == 200:
        return input
    if retry_time < downloadRetryTimes:
        download(url, retry_time+1)
    return


def upload(input, article_name,index, retryTime):
    name = "article/" + article_name + "/" + str(index) + ".jpg"
    result = bucket.put_object(name, input)
    if result.status == 200:
        # print result.status, result.etag
        return name
    if retryTime < uploadRetryTimes:
        upload(input, retryTime+1)



def parse_essay(link):
    print "start downloading article..."
    print "url = " + link
    s = requests.Session()
    s.headers.update({"User-Agent": UA})
    try:
        r = s.get(link)
        html = r.text
        soup = BeautifulSoup(html, "html.parser")
        name = str(soup.select('#activity-name')[0].text)
        name = str.strip(name)
        name = name.replace('\'', '\'\'')
        name = name.replace('\n', '')
        print "article name = " + name
        try:
            date = soup.select('#post-date')[0].text
        except IndexError:
            date = soup.select('#publish_time')[0].text
        print "data = " + date
        content = str(soup.select("#js_content")[0])
        # p = re.compile(r'\?wx_fmt.+?\"')

        ps = soup.select(".rich_media_content")[0].select("p")
        survey = ""

        for p in ps:
            temp_txt = str.strip(str(p.text))
            if temp_txt == "":
                continue
            survey = survey + temp_txt + ","
            if len(survey) > 500:
                break

        print "start upload images..."
        background_imgs = re.findall('background-image: url\("(.*?)"\);', content)
        img_imgs = soup.select("img")
        imgs = []
        for img_img in img_imgs:
            try:
                src = img_img['data-src']
                imgs.append(src)
            except KeyError:
                pass
        for background_img in background_imgs:
            imgs.append(background_img)
        img_index = 0
        for img in imgs:
            input = download(img, 3)
            new_img_name = upload(input, name, img_index, 3)
            new_img_url = "https://worldcuptemp.oss-cn-beijing.aliyuncs.com/" + new_img_name
            img_index = img_index + 1
            content = content.replace(img, new_img_url)
                # content = content.replace(str(img), '<script type="text/javascript">showImg("' + str(src) + '");</script>')
            # print img

        # content = str(soup.select("#js_content")[0])
        content = content.replace("data-src", "src")
    except Exception:
        return None
    return content, name, date, survey


# opts, args = getopt.getopt(sys.argv[1:], "", ["url=", "category=", "image="])
# for op, value in opts:
#     if op == "--url":
#         url = value
#     if op == "--category":
#         category = value
#     if op == "--image":
#         survey_image_url = value
# if url == "" or url is None:
#     print "url 为空"
#     sys.exit()
# if category is None:
#     category = ""
# if survey_image_url is None:
#     survey_image_url = ""


url = "https://mp.weixin.qq.com/s/V0scMj8SCn5fEpDTqRtLFw"
survey_image_url=""
category="亚洲杯"
content, name, date, survey = parse_essay(url)



if survey_image_url != "":
    survey_input = download(survey_image_url, 3)
    survey_image_url = upload(survey_input, name, 999999, 3)
# print content
content = content.replace('\'', '\'\'')
content = content.replace('\n', '')
sql = "insert into userModel_article value(null, '{}','{}','{}','{}','{}',0,'{}')".format(str.strip(name), content, date, survey,survey_image_url, category)
print "start insert mysql..."
# print sql
cursor.execute(sql)
cursor.execute("commit")
print "success"
# print str.strip(name)
# print date

