爬一爬花椒的美女主播

致敬

(本文只是个人的学习记录,墙裂建议访问原文O(∩_∩)O)
http://dwz.cn/4ZkZue

代码

# -*- coding:utf-8 -*-
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re

'''
从直播列表页面过滤出直播ID:
set()是一个无序的不重复的元素集
html.parser是一个html解析器
for循环使用两个正则依次匹配出/l/开头的所有链接地址和8位纯数字直播id
'''
def filter_liveids(url):
    html = urlopen(url)
    liveids = set()
    bsobj = BeautifulSoup(html, "html.parser")
    for link in bsobi.find_all("a", href=re.compile("^(/l/)")):
        if 'href' in link.attrs:
            newpage = link.attrs['href']
            liveid = re.findall("[0-9]+", newpage)
            liveids.add(liveid[0])
    return liveids

'''
从直播页面过滤出主播ID:
使用正则从title标签中得到8位纯数字的当前主播花椒ID
'''
def get_userid(liveid):
    html = urlopen("http://www.huajiao.com/" + "l/" + str(liveid))
    bsobj = BeautifulSoup(html, "html.parser")
    text = bsobj.title.get_text()
    userid = re.findall("\d{8}", text)
    return userid

'''
从主播个人页面获取其个人信息:
从id为userInfo的div标签中过滤个人信息
依次获取头像Favatar,ID Fuserid
'''
def get_userdata(userid):
    html = urlopen("http://www.huajiao.com/user/" + str(userid))
    bsobj =BeautifulSoup(html, "html.parser")
    data = dict()
    userinfoobj = bsobj.find("div", {"id":"userInfo"})
    data['Favatar'] = userinfoobj.find('div', {"class":"avatar"}).img.attrs['src']
    userid = userinfoobj.find("p", {"class":"user_id"}).get_text()
    data['Fuserid'] = re.findall("\d{8}", userid)
    return data
    原文作者:_weber_
    原文地址: https://www.jianshu.com/p/b3af13525ce5
    本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系博主进行删除。
点赞