Size: 7100
Comment:
|
Size: 7096
Comment:
|
Deletions are marked like this. | Additions are marked like this. |
Line 3: | Line 3: |
代码见 http://zspy.googlecode.com
张沈鹏 [email protected] http://zsp.javaeye.com/
2008-1-23 16:42
1. PycURL
Pycurl http://pycurl.sourceforge.net/
外部libcurl的接口,C写的,比urllib快,功能强.支持循环rewrite陷井的安全深度. 用于做网络爬虫,抓网页.
从 http://pycurl.sourceforge.net/download/ 下载 pycurl-ssl-7.16.4.win32-py2.5.exe 安装.
参考文献1,测试代码
1 #像操作文件一样操作字符串,也可以from cStringIO import StringIO,性能应该会好一些
2 import StringIO
3
4 html = StringIO.StringIO()
5
6 import pycurl
7 c = pycurl.Curl()
8
9 c.setopt(pycurl.URL, 'http://www.baidu.com')
10
11 #写的回调
12 c.setopt(pycurl.WRITEFUNCTION, html.write)
13
14 c.setopt(pycurl.FOLLOWLOCATION, 1)
15
16 #最大重定向次数,可以预防重定向陷阱
17 c.setopt(pycurl.MAXREDIRS, 5)
18
19 #访问,阻塞到访问结束
20 c.perform()
21
22 #打印出 200(HTTP状态码) http://www.baidu.com(生效的url)
23 print c.getinfo(pycurl.HTTP_CODE), c.getinfo(pycurl.EFFECTIVE_URL)
24
25 #输出百度首页的html
26 #print html.getvalue()
然后看看多线程,http://pycurl.cvs.sourceforge.net/pycurl/pycurl/tests/ 有很多例子,还可做参考http://pycurl.sourceforge.net/doc/curlmultiobject.html
我自己改写了一个:)
1 #!/usr/bin/env python
2 #coding=utf-8
3
4 import threading
5 import pycurl
6 from cStringIO import StringIO
7
8 class UrlOpen(threading.Thread):
9 """异步下载网页"""
10
11 def __init__(self):
12 super(UrlOpen,self).__init__()
13 self.opener = pycurl.CurlMulti()
14 self.handle_list=[]
15
16 def add(self,url,recall,writer=StringIO()):
17 """
18 参数:网址,回调函数,存放临时数据的对象
19 """
20 c = pycurl.Curl()
21
22 #可以传给回调函数
23 c.url=url
24 c.content = writer
25 c.recall = recall
26 c.setopt(c.URL,url)
27 c.setopt(c.WRITEFUNCTION,c.content.write)
28
29 self.handle_list.append(c)
30 self.opener.add_handle(c)
31
32 def _remove(self,c):
33 c.close()
34 self.opener.remove_handle(c)
35 self.handle_list.remove(c)
36
37
38 def run(self):
39 num_handle=len(self.handle_list)
40 while 1:
41 ret = self.opener.select(10.0)
42 if ret == -1: continue
43 while 1:
44 num_handle_pre=num_handle
45 ret, num_handle =self.opener.perform()
46 #活动的连接数改变时
47 if num_handle!=num_handle_pre:
48 result=self.opener.info_read()
49 print result
50 for i in result[1]:
51 #成功
52 i.http_code = i.getinfo(i.HTTP_CODE)
53 self._remove(i)
54 i.recall(i)
55 for i in result[2]:
56 #失败,应该记录一下
57 self._remove(i)
58
59 if ret != pycurl.E_CALL_MULTI_PERFORM:
60 break
61
62 _opener=None
63 def urlopen(*arg,**key):
64 global _opener
65 if _opener is None:
66 _opener=UrlOpen()
67 _opener.add(*arg,**key)
68 _opener.start()
69 else:
70 _opener.add(*arg,**key)
71
72 def show(x):
73 print x.content.getvalue()
74 if __name__=="__main__":
75 urlopen("http://www.baidu.com/",show)
76 _opener.join()
又封装了一个异步打开网页的类和函数
1 #coding=utf-8
2
3 import threading
4 from cStringIO import StringIO
5
6 import pycurl
7 """
8 Asyn open url
9 Author:[email protected]
10 2008-1-25 17:14
11 """
12
13 class UrlOpen(threading.Thread):
14 """异步下载网页"""
15
16 def __init__(self,):
17 super(UrlOpen,self).__init__()
18 self.opener = pycurl.CurlMulti()
19 self.handle_list=[]
20 self.waiting=[]
21
22 def add(self,url,recall,catch=None,writer=StringIO()):
23 """
24 参数:网址,回调函数,存放临时数据的对象
25 """
26 if catch is None:
27 def catch(curl,error_no,desp):
28 #print "Error:%s - %s"%(error_no,desp)
29 pass
30
31 c = pycurl.Curl()
32
33 #可以传给回调函数
34 c.url=url
35 c.content = writer
36 c.recall = recall
37 c.catch=catch
38 c.setopt(c.URL,
39 url.encode('utf-8') if type(url) is unicode else url
40 )
41 c.setopt(c.WRITEFUNCTION,c.content.write)
42
43 self.waiting.append(c)
44
45 def _add(self):
46 waiting=self.waiting[:]
47 self.waiting=[]
48 for c in waiting:
49 self.handle_list.append(c)
50 self.opener.add_handle(c)
51
52 def _remove(self,c):
53 c.close()
54 self.opener.remove_handle(c)
55 self.handle_list.remove(c)
56
57
58 def run(self):
59 import select
60 import time
61 num_handle=0
62 while 1:
63 if self.handle_list:
64 ret = self.opener.select(1.0)
65 if ret >= 0:
66 while 1:
67 num_handle_pre=num_handle
68 ret, num_handle =self.opener.perform()
69 #活动的连接数改变时
70 if num_handle!=num_handle_pre:
71 result=self.opener.info_read()
72 for i in result[1]:
73 #成功
74 i.http_code = i.getinfo(i.HTTP_CODE)
75 self._remove(i)
76 i.recall(i)
77 for i in result[2]:
78 #失败,应该记录一下,或回调失败函数
79 #i为(<pycurl.Curl object at 0x00C04C80>, 6, 'Could not resolve host: www.msn.com (Domain name not found)')
80 i[0].catch(*i)
81 self._remove(i[0])
82 if ret != pycurl.E_CALL_MULTI_PERFORM:
83 break
84 else:
85 time.sleep(1)
86 self._add()
87
88 _opener=None
89 def urlopen(*arg,**key):
90 global _opener
91 if _opener is None:
92 _opener=UrlOpen()
93 _opener.start()
94 _opener.add(*arg,**key)
95
96 if __name__=="__main__":
97 def show(x):
98 print x.content.getvalue()
99 print '--'*11
100 urlopen("http://www.baidu.com/",show)
101 urlopen("http://www.google.com/",show)
102 urlopen("http://www.sougou.com/",show)
103 urlopen("http://www.yodao.com/",show)
104 urlopen("http://www.yahoo.com/",show)
105 urlopen("http://www.msn.com/",show)
106 _opener.join()
1.1. 相关文献
PycURL简单学习 http://blog.donews.com/limodou/archive/2005/11/28/641257.aspx
python中的pycurl模块学习 https://forum.eviloctal.com/read.php?tid=27337