第一种 threading.Thread

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import threading
import requests
from bs4 import BeautifulSoup
import re
import time
import queue

course_url = 'xxxxxxxxxxx'

pattern = re.compile(r'<a href="(/zuofa/\d+?)" title="(.*?)" class="shipu">')

headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36'}


# 创建线程类,继承Thread类
# class MyThread(threading.Thread):
# def __init__(self, func, args):
# super(MyThread, self).__init__()
# self.func = func
# self.args = args
#
# def run(self):
# self.func(*self.args)


def extra_links(q, f):
"""
用于多线程采集
:param q: url 队列
:param f: 写入的目标文件
:return:
"""
while not q.empty():
try:
url = q.get()
r = requests.get(url, headers=headers)
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'lxml')
course_list = soup.find(class_='main-left')
links = course_list.find_all(class_='item-tt-link')
for i in links:
href = i.attrs['href']
f.write('http:{} ---->{}\n'.format(href, i.attrs['title']))
q.task_done()
except queue.Empty:
break


def main():
with open('course_link.txt', 'w', encoding='utf8') as f:
q = queue.Queue()
for i in range(1, 101):
q.put('{}?page={}'.format(course_url, i))
threads = []
for i in range(20):
t = threading.Thread(target=extra_links, args=(q, f))
threads.append(t)
for i in threads:
i.start()

for i in threads:
i.join()


if __name__ == '__main__':
s = time.clock()
main()
print('运行时间: %.2f' % (time.clock() - s))

第二种 concurrent.futures import ThreadPoolExecutor

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import requests
from concurrent.futures import ThreadPoolExecutor
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import time
import re
from bs4 import BeautifulSoup

requests.packages.urllib3.disable_warnings(InsecureRequestWarning)


course_url = ['xxxxxxxxxxx?page={}'.format(i) for i in range(1, 101)]

pattern = re.compile(r'<a href="(/zuofa/\d+?)" title="(.*?)" class="shipu">')

headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36'}


def extra_links(url):
r = requests.get(url, headers=headers)
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'lxml')
course_list = soup.find(class_='main-left')
links = course_list.find_all(class_='item-tt-link')
for i in links:
href = i.attrs['href']
f.write('http:{} ---->{}\n'.format(href, i.attrs['title']))


if __name__ == '__main__':
s = time.clock()
with open('course_link.txt', 'w', encoding='utf8') as f:
with ThreadPoolExecutor(max_workers=20) as executor:
executor.map(extra_links, course_url)
print('运行用时: %.2f 秒' % (time.clock() - s))

第三种 from multiprocessing.dummy import Pool as ThreadPool

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from multiprocessing.dummy import Pool as ThreadPool
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import time
import re
from bs4 import BeautifulSoup

requests.packages.urllib3.disable_warnings(InsecureRequestWarning)


course_url = ['xxxxxxxxxxxx?page={}'.format(i) for i in range(1, 101)]

pattern = re.compile(r'<a href="(/zuofa/\d+?)" title="(.*?)" class="shipu">')

headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36'}


def extra_links(url):
r = requests.get(url, headers=headers)
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'lxml')
course_list = soup.find(class_='main-left')
links = course_list.find_all(class_='item-tt-link')
for i in links:
href = i.attrs['href']
f.write('http:{} ---->{}\n'.format(href, i.attrs['title']))


if __name__ == '__main__':
s = time.clock()
with open('course_link.txt', 'w', encoding='utf8') as f:
pool = ThreadPool(20)
pool.map(extra_links, course_url)
pool.close()
pool.join()
print('运行用时: %.2f 秒' % (time.clock() - s))

速度可自行测试 后两种相对简单