需求分析:我们要爬取华为商城手机类别的所有手机参数。但是不要配件的。
1、按F12,随便搜索一个关键字,找到对应的接口。
找到的接口是:
Request URL: https://openapi.vmall.com/mcp/v1/search/queryPrd
Request Method: POST
Status Code: 200 OK
Remote Address: 121.36.48.86:443
Referrer Policy: strict-origin-when-cross-origin
可以看到是POST请求 ,所以我们要找到对应的参数和请求头。后面代码里面有。
2、点击下一页,发下请求接口是一样的,不同的是参数的请求页数。
3、打开一个手机的详情页,查看详情页的请求参数。发现上面的请求链接后面的字符串和第一步的接口获取到的产品id是匹配的。
4、直接使用python requests 请求上面的链接,发现返回的数据里面有我们想要的数据。
分析返回的参数信息,发现当产品是手机的时候,通过bs4 获取到的参数信息组成的列表元素个数都超过14个。其他的基本是几个或者是个不到,所以粗劣认为当小于10个的时候属于手机。则开始爬取。
5、这样我们就可以通过两步来爬取相关的数据了。
1)先通过第1步的接口获取到产品id。
2)通过产品id 去请求详情页获取匹配信息,然后通过bs4去解析,通过字典去保存数据,写入csv文件里面。
源码:
import csv
import os
import time
import requests,json
from bs4 import BeautifulSoup
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en,zh;q=0.9,zh-CN;q=0.8',
'Connection': 'keep-alive',
'Content-Length': '322',
'Content-Type': 'text/plain',
'Cookie': '_areacode=CN; locale_MS=zh_CN; HWWAFSESID=4e56b27883f4f885559c; HWWAFSESTIME=1631871756004; euid=e5087a3613b328a3828a561c0bfd5abb1e38ca8b7b6c4b33; deviceid=1dd8e0997f05274b7a7832576bacb775; TID=1dd8e0997f05274b7a7832576bacb775; sdevid=f0c5dd2caeb6deb7070c3d15846c41d263d0b576; salePortal=1; recommendflag=0; ipaddress=%E5%B9%BF%E4%B8%9C%2C%E5%B9%BF%E5%B7%9E%2C%E4%BB%8E%E5%8C%96%E5%B8%82%2C3855; ipaddressId=3853%2C3854%2C3855; cps_id=136118; cps_track_id=1631946786661_gunz7r2ohu9; CSRF-TOKEN=57EB9158F63738A7ECECDADDE2E7F2C564C8B72B8019C92B; device_data=*2k75MldkXzTmzTTFz1NZZ1OZcgcRetZT92wmpTwmzDlWQJMYNNNZOY1WimNYMM0gYYtiz0wy30mpwi34y4w1lNIkcNIQdZdwdb1YMTNddJYIVcUG5lyC05TW0wmTw2EEtyyTmpXD31kN0IZcZBYxIlMUUZVYQ4zy24G29FccAIOZMpzTzW2GTTjFGTDj2DW2kINMMRaYQUcMZMNZ4UKMNUJJNkxmSH4kWGuhT300jEyySio3nSjy5jNNNZOMNMbbVKNNMbZZTWw9Gm1aZFgJYJIY3ikwyl55mvyIzz5040hYakRNNB0NNYQYZgZbJBYMLJULpDj99TWmsGSlpSTMvjmumTT32Wm2ccIkRQNFFMF1AZAZkEGi9mT3mRJEcZ0BMTTTT3TTzD2jDTS22WD3ZMJZNYUNZNckLZJAYb4UbcMIKN2zjzHT1wWmGOFEGD2jh9zjxn2TvoNMMXMMYNcVKJRdPbNT313Dx2HUUNM1JYNJ5jwv41m59yCwo1wxhgPIcYZFMcYEFMdVIJa1JMMxJbYUXi2TClkzW2hCiw0TEuyH2mzDDzxqY5BUAk55VYIl9ZAZEkwmShlDW4YcEBZMYNDXDCDjT2SSjSSG2DDqlZNYMINNZMYNZIMY9pJMUVJZd4k3TT5ljm9NGDzl2Gy3iS99CTzxjVMMMZZMMTYQJMMVPZOl2DymDi9NFeMEzpl6yluzWYMNZNYY4bUSUMD2mh3u2ryuTlWFVVoxdtIYDzWjXQ==',
'CsrfToken': '',
'Host': 'openapi.vmall.com',
'Origin': 'https://www.vmall.com',
'Referer': 'https://www.vmall.com/',
'sec-ch-ua': '"Google Chrome";v="93", " Not;A Brand";v="99", "Chromium";v="93"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'
}
payload = { "PersonalizeSearch": 2,
"brandType": 0,
"country": "CN",
"keyword": 36,
"lang": "zh-CN",
"pageNum": 1,
"pageSize": 20,
"portal": 1,
"searchFlag": 1,
"searchId": "1dd8e0997f05274b7a7832576bacb775",
"searchSortField": 0,
"searchSortType": "desc",
"sid": "89cc6077cd6c88a51d9e56fe1157a0bacb195cedd0f45242",
"tid": "1dd8e0997f05274b7a7832576bacb775",
}
# 需要的数据列表,后续会根据数据列表获取对应的数据
mylist = ['品牌','型号','传播名','上市时间','操作系统','CPU型号','CPU核数','CPU主频','GPU','NPU','分辨率','运行内存(RAM)','机身内存(ROM)','电池容量','数据线接口']
# 初始化字典,因为不是每台手机都保证有对应的参数,后面爬取到对应的参数会更改字段对应的值,确保数据一致性
UP = dict()
for tkey in mylist:
UP[tkey] = ''
# 中文数字转阿拉伯,因为手机CPU核数不会太大,所以函数简单点
def change_num(kernel_num):
num_dict = {'零': 0, '一': 1, '二': 2, '两': 2, '三': 3, '四': 4, '五': 5, '六': 6, '七': 7, '八': 8, '九': 9,'十': 10}
# 获取字典的keys,用来判断和提取对应的阿拉伯数字
key_list = list(num_dict.keys())
num = 0
for i in kernel_num:
if i == '核':
continue
elif i in key_list:
num = int(num_dict[i])+num
elif i != '核':
num = int(i)+num
return num
def get_product_id(page_num=1):
global PAGENUM
APIURL = "https://openapi.vmall.com/mcp/v1/search/queryPrd"
payload['pageNum'] = page_num
r = requests.post(APIURL,data=json.dumps(payload), headers=headers, verify=False)
data = json.loads(r.text)['resultList']
PAGENUM = json.loads(r.text)['totalCount']# 产品总量
for pro in data:
pid_list.append(pro['productId'])
time.sleep(5)# 避免太频繁调用
def get_info(product_id):
url = 'https://www.vmall.com/product/%s.html'%product_id
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
# 相关参数
para_list = soup.find_all('div', class_='product-parameter-list clearfix')
if len(para_list) > 10:# 分析过手机的列表大部分都是14,其他的都是配件
for tsoup in para_list:
tpara = tsoup.find_all('li')
for tinfo in tpara:
pname = tinfo.find('label').text
para = tinfo.find('span').text
if pname in mylist:
UP[pname] = change_num(para) if pname == 'CPU核数' else para # CPU 保存阿拉伯文字
print(UP)
save_file(UP)
else:
print(url)
time.sleep(5)
def save_file(UP):
'''保存csv文件'''
csv_columns = list(UP.keys())
try:
if os.path.exists(csv_file):
with open(csv_file, 'a', encoding='utf-8-sig') as file:
writer = csv.DictWriter(file, fieldnames=csv_columns, delimiter=',', lineterminator='\n')
writer.writerow(UP)
else:
with open(csv_file, 'w', encoding='utf-8-sig') as csvfile:
print("创建csv文件")
writer = csv.DictWriter(csvfile, fieldnames=csv_columns, delimiter=',', lineterminator='\n')
writer.writeheader()
writer.writerow(UP)
except IOError as err:
print(err)
if __name__ == '__main__':
csv_file = "data.csv"
# 跨域token,这个token好像是会过期的,需要手动页面复制
headers['CsrfToken'] = '4C2CBE8A16F79C393C8709CA32E9A6A81C0CD4C2D17B1C5E'
pid_list = []
# 获取总页数
get_product_id()
# print(pid_list)
total_page = (int(PAGENUM/20)+2)
for pagenum in range(2,total_page):
get_product_id(pagenum)
time.sleep(2)
for pid in pid_list:
get_info(pid)
# print(len(pid_list))