python 爬虫-爬某家北京市各区二手房挂牌量
import requests
from bs4 import BeautifulSoup
url = 'https://bj.lianjia.com/ershoufang'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text, 'html.parser')
districts = soup.select('.position dl:nth-of-type(2) dd div a')
a = 0
for district in districts:
if a > 16:
break
district_name = district.text.strip()
district_link = district['href']
if 'ershoufang' in district_link:
district_link = district_link.replace("ershoufang", '')
district_link = url + district_link
district_res = requests.get(district_link, headers=headers)
district_soup = BeautifulSoup(district_res.text, 'html.parser')
total = district_soup.select('#content > div.leftContent > div.resultDes.clear > h2 > span')[0].text
print('{}: {}'.format(district_name, total))
a = a + 1
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text, 'html.parser')
total = soup.select('#content > div.leftContent > div.resultDes.clear > h2 > span')[0].text
print('{}: {}'.format('总量', total))