WHCSRL 技术网

python爬虫实战一|大众点评网(2)

原网页

1.发现网址url规律

generate_urls获取点评网每一页page连接

def generate_urls(limit=50):
	urls=[]
	template='http://www.dianping.com/shanghai/hotel/p{page}'
	for p in range(1,limit+1):
		url=template.format(page=p)
		urls.append(url)
	return urls
generate_urls(limit=50)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

2.拿到一个页面数据

get_html获取url对应的html数据(网页源代码)

import requests
def get_html(url):
	headers={"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) >AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36"}
	resp=requests.get(url,headers=headers)
	html=resp.text
	return html
	
url='http://www.dianping.com/shanghai/hotel/p1'
get_html(url) 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9

3.数据解析

extract_hotels从html源代码中解析出酒店的字典数据

from pyquery import PyQuery
 
def extract_hotels(html):
	hotels=[]
	doc=PyQuery(html)
	for hotel in doc.items('.hotel-block'):
		hotel_name=hotel('.hotel-name-link').text()
		addr1=hotel('.place a').text()
		addr2=hotel('.walk-dist').text()[1:]
		hotel={'hotel_name':hotel_name,
	      	   'addr1':addr1,
	           'addr2':addr2}
	hotels.append(hotel)
	return hotels
url='http://www.dianping.com/shanghai/hotel/p1'
html=get_html(url) 
hotels=extract_hotels(html)
print(hotels)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18

4.存储数据

import csv
path='F:/papapa/data/dianpin.csv'
csvf=open(path,'a+',encoding='utf-8',newline='')
fieldnames=['hotel_name','addr1','addr2']
writer=csv.DictWriter(csvf,fieldnames=fieldnames)
writer.writeheader()
for hotel in doc.items('.hotel-block'):
	hotel_name=hotel('.hotel-name-link').text()
	addr1=hotel('.place a').text()
	addr2=hotel('.walk-dist').text()[1:]
	data={'hotel_name':hotel_name,
	      'addr1':addr1,
	      'addr2':addr2}
	writer.writerow(data)
csvf.close()
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15

大功告成

重复步骤2-4

import requests
import csv
from pyquery import PyQuery

def generate_urls(limit=50):
    
    """
    获取点评网每一页page连接
    """
    urls=[]
    template='http://www.dianping.com/shanghai/hotel/p{page}'
    for p in range(1,limit+1):
        url=template.format(page=p)
        urls.append(url)
    return urls


def get_html(url):
    
    """
    获取url对应的html数据(网页源代码)
    """
    headers={"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) >AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36"}
    resp=requests.get(url,headers=headers)
    html=resp.text
    return html

def extract_hotels(html):
    
    """
    从html数据中解析出酒店的字典数据,返回字典列表数据
    html:网页源代码字符串
    """
    hotels=[]
    doc=PyQuery(html)
    for hotel in doc.items('.hotel-block'):
        hotel_name=hotel('.hotel-name-link').text()
        addr1=hotel('.place a').text()
        addr2=hotel('.walk-dist').text()[1:]
        hotel={'hotel_name':hotel_name,
               'addr1':addr1,
               'addr2':addr2}
        hotels.append(hotel)
    return hotels


def main(file,limit=50):
    print('开始采集大众点评')
    
    #新建csv
    csvf=open(file,'a+',encoding='utf-8',newline='')
    fieldnames=['hotel_name','addr1','addr2']
    writer=csv.DictWriter(csvf,fieldnames=fieldnames)
    writer.writeheader()
    
    urls=generate_urls(limit)
    for url in urls:
        print("正在采集:{url}".format(url=url))
        html=get_html(url)
        hotels=extract_hotels(html)
        for hotel in hotels:
            writer.writerow(hotel)
    print('成功采集大众点评!')
    csvf.close()
#爬虫主函数 
main(file='F:/papapa/data/dianping.csv',limit=50)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
推荐阅读