失眠网,内容丰富有趣,生活中的好帮手!
失眠网 > 多线程 爬取 趣图网

多线程 爬取 趣图网

时间:2018-08-11 19:43:06

相关推荐

多线程 爬取 趣图网

原文链接: 多线程 爬取 趣图网

上一篇: 百度网盘获取私密分享的文件

下一篇: 欧几里得距离、曼哈顿距离和切比雪夫距离

# coding=utf-8import urllibimport urllib2import urlparseimport reimport bs4import requestsimport cookielibfrom lxml import etreefrom bs4 import BeautifulSoupimport osimport sysimport sslimport threadingimport timereload(sys)sys.setdefaultencoding('utf-8')url = '/article/list/?page=2'hdr = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8','Accept-Language': 'zh-CN,zh;q=0.8','Connection': 'keep-alive','Referer': '','Upgrade-Insecure-Requests': '1',# 'Host': '','User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'}c = cookielib.LWPCookieJar()cookie = urllib2.HTTPCookieProcessor(c)# 附带cookie提交opener = urllib2.build_opener(cookie)# 获取 url 的页面的html代码def getHtml(url):request = urllib2.Request(url, headers=hdr)response = opener.open(request)text = response.read()return text'''获取网页的 所有 图片组的链接输入/article/list/?page=2返回/article/detail/1923030 '''def get_img_url(url):html = getHtml(url)soup = BeautifulSoup(html, 'lxml')# 找所有的图片组a标签all_a = soup.find_all('a', class_='list-group-item random_list')'''<a href="/article/detail/6424222" class="list-group-item random_list"<div class="random_title">我们是谁?(程序员版本)<div class="date">-08-19</div></div>'''# all_a是图片的链接all_img_url = []for a in all_a:# a['href'] : /article/detail/2904682all_img_url.append(a['href'])return all_img_url# 给一个 /article/detail/2904682# 保存为文件夹和图片的格式def save_img(url):html = getHtml(url)'''<div class="artile_des"><table><tbody><tr><td><a href="/photo/8759971"><img src="///large/9150e4e5ly1fio901ka7jj20c80c8glz.jpg"alt="我敬往事一杯酒,当初眼瞎爱过狗"'''soup = BeautifulSoup(html, 'html.parser')''' 获取标题,也是文件夹名称<li class="list-group-item"><div class="pic-title"><h1><a href="/article/detail/2904682">敬你一杯</a></h1>'''title = soup.select('li.list-group-item > div.pic-title > h1 > a')[0].get_text()# 所有图片的链接all_img_urls = []img_srcs = soup.select("div.artile_des > table > tbody > tr > td > a > img")for i in img_srcs:all_img_urls.append('https:' + i['src'])if not os.path.exists('imgs/' + title):os.makedirs('imgs/' + title)# 保证图片顺序img_num = 0for i in all_img_urls:# 文件名称name = '%d_' % img_num + i.split('/')[-1]path = 'imgs/' + title + '/' + nameprint pathwith open(path, 'wb') as f:f.write(getHtml(i))img_num += 1'''给url = '/article/list/?page=2'保存所有网页的图片组到文件夹'''def save_page_imgs(url):img_urls = get_img_url(url)for i in img_urls:print isave_img(i)url = '/article/detail/6424222'# save_img(url)# cnt = 0# for i in range(2, 100):#try:# url = '/article/list/?page=%d' % i# save_page_imgs(url)#except Exception, e:# print(e)## print cntclass Craw(threading.Thread):def __init__(self, id, urls):threading.Thread.__init__(self)self.urls = urlsself.id = iddef run(self):for i in self.urls:try:print '爬虫 %d 正在爬取 %s' % (self.id, i)save_page_imgs(i)except Exception,e:print eurl_group = []# 2到181 个页面for i in range(2, 20):urls = []for j in range(10):url = '/article/list/?page=%s' % str((i - 2) * 10 + j + 2)urls.append(url)url_group.append(urls)cnt = 0for urls in url_group:craw = Craw(cnt, urls)craw.start()cnt += 1

使用python3 去掉错误文件

import ospath = r'D:\code\py\py27\爬取斗图网\imgs'lists = os.listdir(path)print(len(lists))def remove_dir(path):if os.path.isfile(path ):os.remove(path)else:files = os.listdir(path)for i in files:remove_dir(path+'//'+i)os.removedirs(path)for i in lists:print(i)p = path + '\\' + iprint(p)t = os.listdir(p)print(len(t))if len(t) <= 2:remove_dir(p)

多进程版

# coding=utf-8import urllibimport urllib2import urlparseimport reimport bs4import requestsimport cookielibfrom lxml import etreefrom bs4 import BeautifulSoupimport osimport sysimport sslimport threadingimport timereload(sys)sys.setdefaultencoding('utf-8')url = '/article/list/?page=2'hdr = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8','Accept-Language': 'zh-CN,zh;q=0.8','Connection': 'keep-alive','Referer': '','Upgrade-Insecure-Requests': '1',# 'Host': '','User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'}c = cookielib.LWPCookieJar()cookie = urllib2.HTTPCookieProcessor(c)# 附带cookie提交opener = urllib2.build_opener(cookie)# 获取 url 的页面的html代码def getHtml(url):request = urllib2.Request(url, headers=hdr)response = opener.open(request)text = response.read()return text'''获取网页的 所有 图片组的链接输入/article/list/?page=2返回/article/detail/1923030 '''def get_img_url(url):html = getHtml(url)soup = BeautifulSoup(html, 'lxml')# 找所有的图片组a标签all_a = soup.find_all('a', class_='list-group-item random_list')'''<a href="/article/detail/6424222" class="list-group-item random_list"<div class="random_title">我们是谁?(程序员版本)<div class="date">-08-19</div></div>'''# all_a是图片的链接all_img_url = []for a in all_a:# a['href'] : /article/detail/2904682all_img_url.append(a['href'])return all_img_url# 给一个 /article/detail/2904682# 保存为文件夹和图片的格式def save_img(url):html = getHtml(url)'''<div class="artile_des"><table><tbody><tr><td><a href="/photo/8759971"><img src="///large/9150e4e5ly1fio901ka7jj20c80c8glz.jpg"alt="我敬往事一杯酒,当初眼瞎爱过狗"'''soup = BeautifulSoup(html, 'html.parser')''' 获取标题,也是文件夹名称<li class="list-group-item"><div class="pic-title"><h1><a href="/article/detail/2904682">敬你一杯</a></h1>'''title = soup.select('li.list-group-item > div.pic-title > h1 > a')[0] \.get_text().strip()# 所有图片的链接all_img_urls = []img_srcs = soup.select("div.artile_des > table > tbody > tr > td > a > img")for i in img_srcs:all_img_urls.append('https:' + i['src'])if not os.path.exists('imgs/' + title):os.makedirs('imgs/' + title)# 保证图片顺序img_num = 0for i in all_img_urls:# 文件名称name = '%d_' % img_num + i.split('/')[-1]name = name.strip()path = 'imgs/' + title + '/' + namewith open(path, 'wb') as f:f.write(getHtml(i))img_num += 1'''给url = '/article/list/?page=2'保存所有网页的图片组到文件夹'''def save_page_imgs(url):img_urls = get_img_url(url)for i in img_urls:print isave_img(i)def start(urls):for i in urls:try:print '爬虫正在爬取 %s' % (i)save_page_imgs(i)except Exception, e:print eurl_group = []# 1到500 个页面,十个页面一组for i in range(0, 50):urls = []for j in range(10):url = '/article/list/?page=%s' % str(i * 10 + j + 1)urls.append(url)url_group.append(urls)import multiprocessingimport timeif __name__ == "__main__":try:pool = multiprocessing.Pool(processes=50)cnt = 0for urls in url_group:pool.apply_async(start, (urls,)) # 维持执行的进程总数为processes,当一个进程执行完毕后会添加新的进程进去cnt += 1print "Mark~ Mark~ Mark~~~~~~~~~~~~~~~~~~~~~~"pool.close()pool.join() # 调用join之前,先调用close函数,否则会出错。执行完close后不会有新的进程加入到pool,join函数等待所有子进程结束print "finshed"except Exception, e:print e

如果觉得《多线程 爬取 趣图网》对你有帮助,请点赞、收藏,并留下你的观点哦!

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。