标签:get url 爬虫 find html 新版 li div
# -*- coding:UTF-8 -*-
import requests
from bs4 import BeautifulSoup
import io
import time
import sys
import os
# 主链接
base_url = "https://www.tupianzj.com/"
def get(url):
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0)Gecko/20100101 Firefox/61.0"}
requests.adapters.DEFAULT_RETRIES = 5
s = requests.session()
s.keep_alive = False
res = s.get(url, headers=headers)
return res
def get_html(url):
res = get(url)
res.encoding = "gb2312" # 需要根据网页编码调整
html = res.text
return html
def get_pic(url, filename):
html = get_html(url)
soup = BeautifulSoup(html, 'html.parser')
div = soup.find("div", id="bigpic")
img = div.find("img")
img_src = img.get("src")
pic = get(img_src)
with io.open(filename, 'wb') as f:
f.write(pic.content)
div_1 = soup.find("div", class_="pages")
li = div_1.find_all("li")
pages = int(li[0].text[1:][:-3])
return pages
def main():
html = get_html(base_url+"meinv/xinggan/")
soup = BeautifulSoup(html, 'html.parser')
div = soup.find("div", class_="list_con_box")
ul = div.find("ul", class_="list_con_box_ul")
li = ul.find_all("li")
for i in li:
a = i.find("a")
href = a.get("href")
title = a.get("title")
path = u"C:/Users/Administrator/Desktop/MM/{name}".format(name=title)
os.makedirs(path)
filename = path + "/1.jpg"
pages = get_pic(base_url+href, filename)
for j in range(2, pages+1):
url = base_url + href[:-5] + "_" + str(j) + ".html"
filename = path + "/" + str(j) + ".jpg"
get_pic(url, filename)
print(title+"下载好了!")
print("Complet!")
if __name__ == '__main__':
main()
标签:get,url,爬虫,find,html,新版,li,div 来源: https://www.cnblogs.com/wavexu/p/13702185.html
本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享; 2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关; 3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关; 4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除; 5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。