import urllib,re,time,random
from datetime import datetime
from utils.until_wx import Until
from bs4 import BeautifulSoup
from db.dbbase import DBBase
from db.models import WeiXinData
#from db.wx_data import insert_weixin_data,get_wx_by_mid

from logger.log import parser

dbbase = DBBase()
until = Until()


'''
url编码
'''
def url_encode(word):
    # 对URL进行编码
    data = {
        '': word,
    }
    resu = urllib.parse.urlencode(data)
    re_g = '.*=(.*)'
    keyword_url = re.match(re_g, resu).group(1)
    return keyword_url

'''
处理数据
'''
def hand_source(adminid,keyword,cookies,):

    x = until.request_add_cookies(cookies)
    page_count = 0
    length = 100
    while page_count < length:
        time.sleep(random.randint(0, 5))
        page_count = page_count + 1
        print('#########这是第' + str(page_count) + '页数据#####')
        keyword_url = url_encode(keyword)
        login_url = 'http://weixin.sogou.com/weixin?query={}&_sug_type_=&sut=1571&lkt=5%2C1497837680560%2C1497837681243&' \
                     's_from=input&_sug_=y&type=2&sst0=1497837681355&page={}' \
                    '&ie=utf8&w=01019900&dr=1'.format(keyword_url, str(page_count))

        data_html = x.get(login_url, headers= until.useragent())
        data_html.encoding = 'utf-8'
        data_html_text = data_html.text
        soup = BeautifulSoup(data_html_text,'lxml')
        try:
            content_ul = soup.find('ul',{'class' :  'news-list'})
            content_li = content_ul.find_all('li')

        except Exception as e:
            data_html_title = soup.find('title').text
            if data_html_title == '搜狗搜索':
                print('出现验证码')
                x = until.request_add_cookies(cookies)
                # 重新获取该页面信息
                data_html = x.get(login_url)
                data_html.encoding = 'utf-8'
                data_html_text = data_html.text
                soup = BeautifulSoup(data_html_text, 'lxml')
                content_ul = soup.find('ul', {'class': 'news-list'})
                content_li = content_ul.find_all('li')
            else:
                print(e)
                dbbase.insert_pagecount_state(keyword=keyword, page=str(page_count), pagecount='0',state='3',excepritoncontent=str(e),type='2')

        for i, li in zip(range(0, len(content_li), 1), content_li):

            url_soup = BeautifulSoup(str(li), 'xml')
            url_s = url_soup.find('a', id='sogou_vr_11002601_title_' + str(i)).get('href')
            url_z = url_s.replace('amp;', '')
            page_content = until.requestGet(cookies='0', url = url_z)

            page_content_soup = BeautifulSoup(page_content, 'lxml')
            # 判断是否出现阅读全文
            check_result = check_all_content(page_content_soup)
            if check_result:
                page_content_text = until.requestGet(cookies= '0', url=check_result)
                page_content_soup = BeautifulSoup(page_content_text, 'lxml')

            weixin_data = WeiXinData()
            #try:
            try:
                script = page_content_soup.find_all('script')[1]
                script = str(script).strip().replace('\n', '')
                re_g = '.*var mid =.*(\d{10})'
                weixin_data.weixin_id = int(re.match(re_g, str(script)).group(1))
            except Exception as e:
                parser.error('解析微信内容页面ID出错:{}, 页面URL{}'.format(e, url_z))
                break
            try:
                weixin_data.weixin_title = until.remove_space(page_content_soup.find('h2', id='activity-name').text)
            except Exception as e:
                weixin_data.weixin_title = 'Exception'
                parser.error('解析微信内容页面标题出错:{}, 页面URL{}'.format(e, url_z))
            try:
                weixin_data.weixin_created = until.remove_space(page_content_soup.find('em', id='post-date').text)
            except Exception as e:
                weixin_data.weixin_created = 'Exception'
                parser.error('解析微信内容页面创建时间出错:{}, 页面URL{}'.format(e, url_z))
            try:
                weixin_data.weixin_created_name = until.remove_space(
                    page_content_soup.find('a', id='post-user').text)
            except Exception as e:
                weixin_data.weixin_created_name = 'Exception'
                parser.error('解析微信内容页面创建人出错:{}, 页面URL{}'.format(e, url_z))
            try:
                weixin_data.weixin_cont = until.remove_space(page_content_soup.find('div', id='js_content').text)
            except Exception as e:
                weixin_data.weixin_cont = 'Exception'
                parser.error('解析微信内容页面文章内容出错:{}, 页面URL{}'.format(e, url_z))
            weixin_data.admin_id = adminid
            weixin_data.searchtime = str(datetime.now())
            weixin_data.weixin_keyword = keyword
            # ------mysql-------
            # res = get_wx_by_mid(weixin_data.weixin_id)
            # if res:
            #     break
            #insert_weixin_data(weixin_data)
            # ------mysql-------
            # --------mongodb---------

            dbdata = dbbase.find_weixin_byid(str(weixin_data.weixin_id))
            if dbdata is None:
                wx_data =  eval('{' + str(weixin_data) + '}')
                dbbase.insert_weixin(wx_data)

            #  --------mongodb---------
        # 判断是否有下一页，没有就停止
        next_page = soup.find('a',{'id':'sogou_next'})
        if next_page == None:
            print('没有下一页，采集结束')
            return
'''
检查是否出现点击阅读全文
'''
def check_all_content(page_content_soup):
    try:
        page_clic_a = page_content_soup.find('a', {'id': 'js_share_source'})['href']
        return page_clic_a
    except:
        return False

if __name__=='__main__':


    cookies = [{'value': 'C698E2DD232C940A00000000595C5299', 'name': 'SUID', 'expiry': 2129942690.412663, 'path': '/',
                'secure': False, 'httpOnly': False, 'domain': '.weixin.sogou.com'},
               {'value': '0|1499222681|v1', 'name': 'ABTEST', 'expiry': 1501814690.412553, 'path': '/', 'secure': False,
                'httpOnly': False, 'domain': 'weixin.sogou.com'},
               {'value': '18-28814001-AVlcUv63qNoIKLw7N6jpjZA', 'name': 'sgid', 'expiry': 1500432391.261185,
                'path': '/', 'secure': False, 'httpOnly': False, 'domain': '.sogou.com'},
               {'value': 'CN3201', 'name': 'IPLOC', 'expiry': 1530758690.412625, 'path': '/', 'secure': False,
                'httpOnly': False, 'domain': '.sogou.com'}, {
                   'value': '5|1499222782|1500432382|dHJ1c3Q6MToxfGNsaWVudGlkOjQ6MjAxN3x1bmlxbmFtZToxMzpTaWxseSUyMGZvcmNlfGNydDoxMDoxNDk5MjIyNzgyfHJlZm5pY2s6MTM6U2lsbHklMjBmb3JjZXx1c2VyaWQ6NDQ6bzl0Mmx1STFkNlV3MlZzbVpveW1KejBtYmpJb0B3ZWl4aW4uc29odS5jb218',
                   'name': 'ppinf', 'expiry': 1500432391.261071, 'path': '/', 'secure': False, 'httpOnly': False,
                   'domain': '.sogou.com'},
               {'value': 'C698E2DD3921940A00000000595C529A', 'name': 'SUID', 'expiry': 2129942690.804729, 'path': '/',
                'secure': False, 'httpOnly': False, 'domain': '.sogou.com'},
               {'value': '1', 'name': 'weixinIndexVisited', 'expiry': 1507862691, 'path': '/', 'secure': False,
                'httpOnly': False, 'domain': 'weixin.sogou.com'},
               {'value': '00E75AA0DDE298C6595C529BAADE2832', 'name': 'SUV', 'expiry': 1814582692.57767, 'path': '/',
                'secure': False, 'httpOnly': False, 'domain': '.sogou.com'}, {
                   'value': 'igj3BXQxsbfwXVhtxaKy1o_o67RyW1zLgYQ-c7OaPSA9ORajCQqbNDltLWp4cXeCQLHr-FILKcpnLyLwFuEAvj1CZ77ujiEwfC-MMVUtrzlZ0jxnFwPxaoImwDJPiDkKss3P9OonpkymHDgNKk1qaBFf0oux03ngvkG-DrWUOdo',
                   'name': 'pprdig', 'expiry': 1500432391.26114, 'path': '/', 'secure': False, 'httpOnly': False,
                   'domain': '.sogou.com'},
               {'value': '14992227820000008699c600d18deb4d7e565f0b785881e1', 'name': 'ppmdig', 'path': '/',
                'httpOnly': True, 'secure': False, 'domain': 'weixin.sogou.com'}]

    hand_source('test','药品',cookies,)