# coding:utf-8
import time
import re
import MySQLdb
import os

# chdomain='//ch.gongchang.com'
# predomain='//ch.gongchang.com'
# comdomain='//company.ch.gongchang.com'
# mobdomain='//m.ch.gongchang.com'
import requests

def getprourl(pid, cate1=0, province=0, urltype=1, mobile=None):
    '''
    路由规则
    '''
    domain = ''
    type = [1, 2, 3]
    if province == 0 and urltype == 2:
        urltype = 3
    if cate1 == 0 and urltype == 1:
        urltype = 3
    # 随机过程（1，2）
    if urltype not in type:
        urltype = 1
    db = MySQLdb.connect(host='172.20.18.27', port=3306, user='seoadmin',
                         passwd='JQExGE2YKe5', db='seo', charset='utf8')

    cur = db.cursor()
    cate = "SELECT catetype, pinyin, domain FROM cate where cateid = %d" % cate1

    cur.execute(cate)
    data = cur.fetchall()
    # print data
    if not data:
        return '', ''

    if cate1 == 0 and province == 0:
        urltype = 3

    if cate1 == 0 and province > 0 and urltype == 1:
        urltype = 2
    if cate1 > 0:
        catetype = data[0][0]
        # print catetype
        if province >= 0 and catetype < 5 and urltype == 3:
            urltype = 1
        elif province >= 0 and catetype == 5 and urltype == 1:
            urltype = 3

    if urltype == 1:

        if mobile:
            url = 'https://m.ch.gongchang.com' + '/xinxi' + data[0][1] + '/' + str(cate1) + '-' + str(pid)
            domain = 'm.ch.gongchang.com'
            # print urls
        else:
            url = 'https://' + data[0][2] + '/xinxi/' + data[0][1] + str(cate1) + '-' + str(pid)
            # print url
            domain = data[0][2]

    elif urltype == 2:
        province_sql = "SELECT pinyin FROM area where id = %d" % province
        cur.execute(province_sql)
        province_data = cur.fetchall()
        # print province_data
        if not province_data:
            return '', ''

        if mobile:
            url = 'https://m.ch.gongchang.com' + '/xinxi' + province_data[0][0] + '/' + str(cate1) + '-' + str(pid)
            # print url
            domain = 'm.ch.gongchang.com'
        else:
            url = 'https://' + province_data[0][0] + '//ch.gongchang.com' + '/xinxi/' + str(cate1) + '-' + str(pid)
            # print url
            domain = province_data[0][0]

    else:
        if mobile:
            url = 'https://m.ch.gongchang.com' + '/gcxinxi/' + data[0][1] + str(cate1) + '-' + str(pid)
            # print url
            domain = data[0][1]
        else:
            url = 'https://ch.gongchang.com' + '/gcxinxi/' + data[0][1] + str(cate1) + '-' + str(pid)
            domain = 'ch.gongchang.com'
        # print url
    cur.close()
    db.close()

    return url + ".html", domain

def save_urls():
    '''
    拼接url,每2000条保存到文件夹
    :return:
    '''
    g_db = MySQLdb.connect(host='172.20.18.27', port=3306, user='seoadmin',
                           passwd='JQExGE2YKe5', db='seoproduct', charset='utf8')

    g_cur = g_db.cursor()

    domain_urls = {}    #  url
    domain_num = {}   # 计数
    domain_files = {}   #  域名文件 字典
    for i in range(0, 200):
        data_sql = "SELECT pid, cate1, province, url from pd_info_%s " \
                   "WHERE unix_timestamp(now()) - addtime < 300" % str(i)

        g_cur.execute(data_sql)
        result_data = g_cur.fetchall()
        for result in result_data:
            url, domain = getprourl(result[0], result[1], result[2], result[3])
            if not url:
                continue
            if domain not in domain_urls:
                domain_urls[domain] = []
            domain_urls[domain].append(url)
            domain_num[domain] = domain_num.get(domain, 0) + 1
            num = domain_num[domain]
            if domain not in domain_files:
                file_name = 'pro_%s_0_.txt' % (domain)
                domain_files[domain] = [file_name]

            if (num % 2000 == 0):
                with open(file_name, 'a') as f:
                    f.write('\n'.join(domain_urls[domain]))
                domain_urls[domain] = []
                file_name = 'pro_%s_%s_.txt' % (domain, num)
                domain_files[domain].append(file_name)
    for domain, urls in domain_urls.items():
        if urls:
            with open(domain_files[domain][-1], 'a') as f:
                f.write('\n'.join(urls))
    g_cur.close()
    g_db.close()

    for domain,files in domain_files.items():
        url = "/urls?site=%s&token=W1OQD12hHPMw9UJP" % str(domain)
        for i in files:
            filecontents = {"file": open(i, "rb")}
            r = requests.post("http://data.zz.baidu.com" + url, files=filecontents)
            baiduresult = "推送结果%s \n" % (r.text).encode('utf-8')
            print  baiduresult
            os.remove(i)

if __name__ == '__main__':

      while True:
        save_urls()
        time.sleep(300)
