且构网

分享程序员开发的那些事...
且构网 - 分享程序员编程开发的那些事

Python_Crawler_Urllib库_bilibili搜索

更新时间:2022-10-13 11:47:46

环境介绍:

pycharm:图片下方附有下载链接


python:图片下方附有下载链接


Python_Crawler_Urllib库_bilibili搜索

Download PyCharm: Python IDE for Professional Developers by JetBrains

python:图片下方附有下载链接


Python_Crawler_Urllib库_bilibili搜索

Python Releases for Windows | Python.org

依赖库:配置完pycharm后,安装所需的库

Python_Crawler_Urllib库_bilibili搜索

爬虫源码

from bs4 import BeautifulSoup
import re
import urllib.request,urllib.error
import xlwt
import time
 
def main():
    baseurl = "https://search.bilibili.com/video?keyword=%E9%9D%9E%E8%AF%9A%E5%8B%BF%E6%89%B0&page="
    datalist = getData(baseurl)
    savepath = ".\\非诚勿扰.xls"
    saveData(datalist,savepath)
 
 
#正则查找规则
findUrl = re.compile(r'<a class="img-anchor" href="(.*?)[?]from')
findImgSrc = re.compile(r'<a .*? title="(.*?)"',re.S)
findRB = re.compile(r'<span class="so-imgTag_rb">(.*?)</span>')
findUpName = re.compile(r'<a class="up-name".*?>(.*?)</a>')
findSoIconTime = re.compile(r'<i class="icon-date"></i>(.*?)</span>',re.S)
findWatchNum = re.compile(r'<i class="icon-playtime"></i>(.*?)</span>',re.S)
 
def getData(baseurl):
    datalist = []
    for i in range(1,51):
        url = baseurl + str(i)
        html = askURL(url)
        time.sleep(0.5)
        #解析
        soup = BeautifulSoup(html,"html.parser")
        for item in soup.find_all('li',class_='video-item matrix'):        
            data = []       #保存一个视频的所有信息
            item = str(item)
         
            Url = re.findall(findUrl,item)[0]
            pint = 'https:'
            data.append(pint+Url)
            ImgSrc = re.findall(findImgSrc,item)[0]
            data.append(ImgSrc)
            RB = re.findall(findRB,item)[0]
            data.append(RB)
            UpName = re.findall(findUpName,item)[0]
            data.append(UpName)
            SoIc = re.findall(findSoIconTime,item)[0].replace("\n","")
            data.append(SoIc.strip())
            Watch = re.findall(findWatchNum,item)[0].replace("\n","")
            data.append(Watch.strip())
            datalist.append(data)
 
    return datalist
 
def askURL(url):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36"
            }
        #用户代理
    request = urllib.request.Request(url,headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
 
    except urllib.error.URLError as e:
        if hasattr(e,"code"):
            print(e.code)
        if hasattr(e,"reason"):
            print(e.reason)
    return html
 
 
def saveData(datalist,savepath):
    book = xlwt.Workbook(encoding="utf-8",style_compression=0)
    sheet = book.add_sheet('非诚勿扰b站视频',cell_overwrite_ok=True)
    col = ("视频链接","标题","视频时长","up主","发布时间","播放次数")
    for i in range (0,6):
        sheet.write(0,i,col[i])
    for i in range(0,1000):
        print("第%d条" %(i+1))
        data = datalist[i]
        for j in range(0,6):
            sheet.write(i+1,j,data[j])
    book.save(savepath)
 
 
if __name__ == '__main__':
    main()
    print("完成!")