参考:

http://blog.csdn.net/wklken/article/details/7364390

http://hankjin.blog.163.com/blog/static/3373193720105140583594/

1、基本用法:

 res=urllib2.urlopen(url)
print res.read()

2. 加上要get或post的数据

 data={"name":"hank", "passwd":"hjz"}
urllib2.urlopen(url, urllib.urlencode(data))

3. 加上http头

 header={"User-Agent": "Mozilla-Firefox5.0"}
urllib2.urlopen(url, urllib.urlencode(data), header)

4. 加上session

 cj = cookielib.CookieJar()
cjhandler=urllib2.HTTPCookieProcessor(cj)
opener = urllib2.build_opener(cjhandler)
urllib2.install_opener(opener)

5. 加上Basic认证

 password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
top_level_url = "http://www.163.com/"
password_mgr.add_password(None, top_level_url, username, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)

6. 使用代理

 proxy_support = urllib2.ProxyHandler({"http":"http://1.2.3.4:3128/"})
opener = urllib2.build_opener(proxy_support)
urllib2.install_opener(opener)

7. 设置超时

socket.setdefaulttimeout(5)
或者
urllib2.urlopen(url, timeout=5)

大段代码参考:

     #!/usr/bin/python
# -*- coding:utf-8 -*-
# urllib2_test.py
# author: wklken
# 2012-03-17 wklken@yeah.net import urllib,urllib2,cookielib,socket url = "http://www.testurl....." #change yourself
#最简单方式
def use_urllib2():
try:
f = urllib2.urlopen(url, timeout=5).read()
except urllib2.URLError, e:
print e.reason
print len(f) #使用Request
def get_request():
#可以设置超时
socket.setdefaulttimeout(5)
#可以加入参数 [无参数,使用get,以下这种方式,使用post]
params = {"wd":"a","b":""}
#可以加入请求头信息,以便识别
i_headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5",
"Accept": "text/plain"}
#use post,have some params post to server,if not support ,will throw exception
#req = urllib2.Request(url, data=urllib.urlencode(params), headers=i_headers)
req = urllib2.Request(url, headers=i_headers) #创建request后,还可以进行其他添加,若是key重复,后者生效
#request.add_header('Accept','application/json')
#可以指定提交方式
#request.get_method = lambda: 'PUT'
try:
page = urllib2.urlopen(req)
print len(page.read())
#like get
#url_params = urllib.urlencode({"a":"1", "b":"2"})
#final_url = url + "?" + url_params
#print final_url
#data = urllib2.urlopen(final_url).read()
#print "Method:get ", len(data)
except urllib2.HTTPError, e:
print "Error Code:", e.code
except urllib2.URLError, e:
print "Error Reason:", e.reason def use_proxy():
enable_proxy = False
proxy_handler = urllib2.ProxyHandler({"http":"http://proxyurlXXXX.com:8080"})
null_proxy_handler = urllib2.ProxyHandler({})
if enable_proxy:
opener = urllib2.build_opener(proxy_handler, urllib2.HTTPHandler)
else:
opener = urllib2.build_opener(null_proxy_handler, urllib2.HTTPHandler)
#此句设置urllib2的全局opener
urllib2.install_opener(opener)
content = urllib2.urlopen(url).read()
print "proxy len:",len(content) class NoExceptionCookieProcesser(urllib2.HTTPCookieProcessor):
def http_error_403(self, req, fp, code, msg, hdrs):
return fp
def http_error_400(self, req, fp, code, msg, hdrs):
return fp
def http_error_500(self, req, fp, code, msg, hdrs):
return fp def hand_cookie():
cookie = cookielib.CookieJar()
#cookie_handler = urllib2.HTTPCookieProcessor(cookie)
#after add error exception handler
cookie_handler = NoExceptionCookieProcesser(cookie)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPHandler)
url_login = "https://www.yourwebsite/?login"
params = {"username":"user","password":""}
opener.open(url_login, urllib.urlencode(params))
for item in cookie:
print item.name,item.value
#urllib2.install_opener(opener)
#content = urllib2.urlopen(url).read()
#print len(content)
#得到重定向 N 次以后最后页面URL
def get_request_direct():
import httplib
httplib.HTTPConnection.debuglevel = 1
request = urllib2.Request("http://www.google.com")
request.add_header("Accept", "text/html,*/*")
request.add_header("Connection", "Keep-Alive")
opener = urllib2.build_opener()
f = opener.open(request)
print f.url
print f.headers.dict
print len(f.read()) if __name__ == "__main__":
use_urllib2()
get_request()
get_request_direct()
use_proxy()
hand_cookie()

测试wsgi接口的小例子:

 url ='http://192.168.33.11:9008/getActivityInfo'
data = '''{"userid":"123","type":"1","flag":"t32"}'''
# 使用 urllib.urlencode会报错。。
# response = urllib2.urlopen(url, urllib.urlencode(data))
response = urllib2.urlopen(url, data)
print response.getcode()
print response.read()

最新文章

  1. 关于docker在windows环境下运行的第一次体验
  2. java 将数据写进文件
  3. CSS浏览器兼容问题总结
  4. HTML5中canvas大小调整
  5. 原生JS:Math对象详解
  6. 3-1创建Sql Sever数据库登录名
  7. How to run a (Tomcat)Java application server on a Azure virtual machine
  8. 代码演示 .NET 4.5 自带的 ReadonlyCollection 的使用
  9. NSUserdefaults 简介以及存储自定义数据类型的方法
  10. Android开发--使用真机进行USB调试程序
  11. 实例介绍Cocos2d-x物理引擎:使用关节
  12. mybatis logback打印sql
  13. 微信小程序的开发环境搭建(Windows版本)
  14. mkdir -p 参数的使用
  15. KVM 时钟分析
  16. scrollbar样式设置
  17. linux 大小写转化
  18. java eclipse jdk 关系
  19. 2017-12-15python全栈9期第二天第七节之x or y ,x 为 非 0时,则返回x
  20. 向集合中添加Person类型并对其排序

热门文章

  1. asp.net 开发问题:Web 服务器上的请求筛选被配置为拒绝该请求,因为内容长度超过配置的值。
  2. pull解析xml文件
  3. ACM——回文
  4. C# 自定义排序
  5. 解构控制反转(IoC)和依赖注入(DI)
  6. Java实战之01Struts2-05contextMAP、EL、OGNL
  7. 九度OJ 1435 迷瘴
  8. 移动端reset.css
  9. mac 下 sublime text 运行c++/c 不能使用scanf/cin
  10. DIV+CSS 网页布局之:两列布局