from bs4 import BeautifulSoup
from html.parser import HTMLParser
from html.entities import name2codepoint
import re
import datetime
import time
import urllib.request

class MyHTMLParser(HTMLParser):
    title = False
    new = False
    old = False
    nextP = False
    link = False
    dataAll = ""
    titleStr = ""
    
    def handle_starttag(self, tag, attrs):
        if(tag == "title"):
            self.title = True
##            print("Start tag:", tag)
        elif (tag == "span"):
            self.new = True
        elif (tag == 'li'):
##            print("触发")
            #print(self.get_starttag_text())

            nextP = True
##            for attr in attrs:
##                print("attr:", attr)
##        print("Start tag:", tag)    
        

    def handle_endtag(self, tag):
##        print("end tag:", tag)
        if(tag == "title"):
            self.title = False    
        elif (tag == "span"):
            self.new = False
        elif  (tag == "li"):
            nextP = False

    def handle_data(self, data):
        if self.title:
##            print("Data :", data)
            self.titleStr = data

        if self.new:
            self.dataAll += data
            #print("Data     :", data)

##        if self.nextP:
##            print("nextP Data :", data)
            

##    def handle_comment(self, data):
##        print("Comment  :", data)
##
##    def handle_entityref(self, name):
##        c = chr(name2codepoint[name])
##        print("Named ent:", c)
##
##    def handle_charref(self, name):
####        if name.startswith('x'):
####            c = chr(int(name[1:], 16))
####        else:
####            c = chr(int(name))
##        print("Num ent  :", name)
##
##    def handle_decl(self, data):
##        print("Decl     :", data)
##
##    def handle_pi(self, data) :
##        print("pi:", data)
##
##    def handle_charref(name):
##        print("charref:", name)
##
##    def handle_entityref(name):
##        print("entityref:", name)

##解析上一篇的链接
def getNextUrl(_htmlStr):
    soup = BeautifulSoup(_htmlStr, "html.parser")
    aList = soup.findAll("li")
    
    for link in aList:
        liCont = link.contents
        if (liCont[0] == "上一篇：") :
            if( (liCont[1].contents)[0].find("交易行情")>0):
                return liCont[1].attrs.get( "href" )
                #print(liCont[1].attrs)
                #nextUrl.append( liCont[1].attrs.get( "href" ) )
            else:
                return ""

def getHtmlData(_htmlStr):
    parser = MyHTMLParser()
    #parser.feed(str(htmlStr))
    parser.feed( _htmlStr )
    ##print("Data :", parser.dataAll)

    pattern = re.compile(r"\d\d\d\d年\d+月\d+日")
    match = pattern.search(parser.titleStr)
    if not match:
##        print(match.group(0))  
##    else:
        print("没有匹配")
    #print()

    t = time.strptime(match.group(0),"%Y年%m月%d日")
    y,m,d=t[0:3]
    日期 = datetime.date(y,m,d)
    #print(日期)

##    新房 = 0
##    新房面积 = 0.0
##    新住宅网签 = 0
##    新住宅网签面积 = 0.0
##
##    二手房 = 0
##    二手房面积 = 0.0
##    二手住宅 = 0
##    二手住宅面积 = 0.0


    #1
    pattern = re.compile(r"新建商品房网签\d+套")
    match = pattern.search(parser.dataAll)
    if match:
       # print(match.group(0))
    ##    print("结束位置：", match.end() )
        
        pattern_1 = re.compile(r"\d+")
        match_1 = pattern_1.search(match.group(0))
        if match_1:
            新房 = int(match_1.group(0))
    else:
        print("没有匹配")
    #print()


    tmpStr = parser.dataAll[match.end():]
    pattern1 = re.compile(r"面积[0-9\.]+平方米")
    match1 = pattern1.search(tmpStr)
    ##print(tmpStr)
    if match1:
        #print(match1.group(0))
    ##    print("结束位置：", match1.end() )
        
        pattern_1 = re.compile(r"[0-9\.]+")
        match_1 = pattern_1.search(match1.group(0))
        if match_1:
            新房面积 = float(match_1.group(0))       
    else:
        print("没有匹配")
    #print()
        
    #2
    tmpStr = tmpStr[match1.end():]
    pattern = re.compile(r"住宅网签\d+套")
    match = pattern.search(tmpStr)
    ##print(tmpStr)
    if match:
        #print(match.group(0))
    ##    print("结束位置：", match.end() )
        
        pattern_1 = re.compile(r"\d+")
        match_1 = pattern_1.search(match.group(0))
        if match_1:
            新住宅网签 = int(match_1.group(0))
    else:
        print("没有匹配")

    #print()

    tmpStr = tmpStr[match.end():]
    pattern1 = re.compile(r"面积[0-9\.]+平方米")
    match1 = pattern1.search(tmpStr)
    ##print(tmpStr)
    if match1:
        #print(match1.group(0))
    ##    print("结束位置：", match1.end() )
        
        pattern_1 = re.compile(r"[0-9\.]+")
        match_1 = pattern_1.search(match1.group(0))
        if match_1:
            新住宅网签面积 = float(match_1.group(0))       
    else:
        print("没有匹配")

    #print()

    #3
    tmpStr = tmpStr[match1.end():]
    pattern = re.compile(r"二手房成交\d+套")
    match = pattern.search( tmpStr )
    ##print( tmpStr )
    if match:
        #print(match.group(0))
    ##    print("结束位置：", match.end() )
        
        pattern_1 = re.compile(r"\d+")
        match_1 = pattern_1.search(match.group(0))
        if match_1:
            二手房 = int(match_1.group(0))
    else:
        print("没有匹配")

    #print()

    tmpStr = tmpStr[match.end():]
    pattern1 = re.compile(r"[0-9\.]+平方米")
    match1 = pattern1.search(tmpStr)
    ##print(tmpStr)
    if match1:
        #print(match1.group(0))
    ##    print("结束位置：", match1.end() )
        
        pattern_1 = re.compile(r"[0-9\.]+")
        match_1 = pattern_1.search(match1.group(0))
        if match_1:
            二手房面积 = float(match_1.group(0))       
    else:
        print("没有匹配")

    #print()
        
    #4
    tmpStr = tmpStr[match1.end():]
    pattern = re.compile(r"住宅成交\d+套")
    match = pattern.search(tmpStr)
    ##print(tmpStr)
    if match:
        #print(match.group(0))
    ##    print("结束位置：", match1.end() )
        
        pattern_1 = re.compile(r"\d+")
        match_1 = pattern_1.search(match.group(0))
        if match_1:
            二手住宅 = int(match_1.group(0))
    else:
        print("没有匹配")

    #print()

    tmpStr = tmpStr[match.end():]
    pattern1 = re.compile(r"[0-9\.]+平方米")
    match1 = pattern1.search( tmpStr )
    ##print( tmpStr )
    if match1:
        #print(match1.group(0))
    ##    print("结束位置：", match1.end() )
        
        pattern_1 = re.compile(r"[0-9\.]+")
        match_1 = pattern_1.search(match1.group(0))
        if match_1:
            二手住宅面积 = float(match_1.group(0))       
    else:
        print("没有匹配")

    print(日期, ";", 新房, ";", 新房面积, ";", 新住宅网签, ";", 新住宅网签面积, ";", 二手房, ";", 二手房面积, ",", 二手住宅, ",", 二手住宅面积)

##    print(新房)
##    print(新房面积)
##    print(新住宅网签)
##    print(新住宅网签面积)
##
##    print(二手房)
##    print(二手房面积)
##    print(二手住宅)
##    print(二手住宅面积)

#url = urllib.request.pathname2url("file:///E:/awei/test/html/2.htm")
#htmlStr = urllib.request.urlopen(u"file:///E:/awei/test/html/1.htm").read().decode()
htmlStr = urllib.request.urlopen(u"http://xx.yyfdcw.com/html/news/jyhq/18925.html").read().decode()
url = ""
preUrl = "http://xx.yyfdcw.com"

#htmlF = open("C:/Users/lenovo/Desktop/2.htm", encoding="utf-8")
#htmlStr = htmlF.read()
times = 0
##while ( ( len(htmlStr)>0 ) or (len(url)>0) ):
while ( 1 ):
    #print(times)
    times+=1
    if ( (len(htmlStr) == 0) and (len(url)>0) ):
        htmlStr = urllib.request.urlopen(url).read().decode()
    if( len(htmlStr)>0 ):     
        getHtmlData(htmlStr)
        url = getNextUrl(htmlStr)
        htmlStr =  ""
        if(url == ""):
            break
        url = preUrl+url


