requests

Requests模块

官方文档:http://cn.python-requests.org/zh_CN/latest/user/quickstart.html#id4html

  Python标准库中提供了:urllib、urllib二、httplib等模块以供Http请求,可是,它的 API 太渣了。它是为另外一个时代、另外一个互联网所建立的。它须要巨量的工做,甚至包括各类方法覆盖,来完成最简单的任务。python

  Requests 是使用 Apache2 Licensed 许可证的 基于Python开发的HTTP 库,其在Python内置模块的基础上进行了高度的封装,从而使得Pythoner进行网络请求时,变得美好了许多,使用Requests能够垂手可得的完成浏览器可有的任何操做。git

一、GET请求github

# 一、无参数实例
  
import requests
  
ret = requests.get('https://github.com/timeline.json')
  
print(ret.url)
print(ret.text)
  
  
  
# 二、有参数实例
  
import requests
  
payload = {'key1': 'value1', 'key2': 'value2'}
ret = requests.get("http://httpbin.org/get", params=payload)
  
print (ret.url)
print (ret.text)

 

 二、POST请求正则表达式

 

# 一、基本POST实例
  
import requests
  
payload = {'key1': 'value1', 'key2': 'value2'}
ret = requests.post("http://httpbin.org/post", data=payload)
  
print(ret.text)
  
  
# 二、发送请求头和数据实例
  
import requests
import json
  
url = 'https://api.github.com/some/endpoint'
payload = {'some': 'data'}
headers = {'content-type': 'application/json'}
  
ret = requests.post(url, data=json.dumps(payload), headers=headers)
  
print(ret.text)
print(ret.cookies)

 

 三、其余请求算法

requests.get(url, params=None, **kwargs)
requests.post(url, data=None, json=None, **kwargs)
requests.put(url, data=None, **kwargs)
requests.head(url, **kwargs)
requests.delete(url, **kwargs)
requests.patch(url, data=None, **kwargs)
requests.options(url, **kwargs)
  
# 以上方法均是在此方法的基础上构建
requests.request(method, url, **kwargs)

 

 四、更多参数json

 

参数 2.1 url
    
requests.post("http://httpbin.org/post", data=payload)
     requests.request(method='post', url='http://127.0.0.1:8000/test/')
2.2 headers
    # 发送请求头到服务器端
    requests.request(
      method='POST', url='http://127.0.0.1:8000/test/',
      json={'k1': 'v1', 'k2': '水电费'},
      headers={'Content-Type': 'application/x-www-form-urlencoded'}
     )
 
2.3 cookies
   
 # 发送Cookie到服务器端
   
 
requests.request(
      method='POST',
       url='http://127.0.0.1:8000/test/',
      data={'k1': 'v1', 'k2': 'v2'},
      cookies={'cook1': 'value1'},
      )

    
#
也可使用CookieJar(字典形式就是在此基础上封装)
from http.cookiejar import CookieJar
from http.cookiejar import Cookie
obj = CookieJar()
obj.set_cookie(Cookie(version=0, name='c1', value='v1', port=None, domain='', path='/',
secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None},
rfc2109=False, port_specified=False, domain_specified=False, domain_initial_dot=False, path_specified=False) )
requests.request(method='POST', url='http://127.0.0.1:8000/test/', data={'k1': 'v1', 'k2': 'v2'}, cookies=obj)
2.4 params
   
 # - 能够是字典
    # - 能够是字符串
# - 能够是字节(ascii编码之内)
    requests.request(method='get', url='http://127.0.0.1:8000/test/', params={'k1': 'v1', 'k2': '水电费'})
    requests.request(method='get', url='http://127.0.0.1:8000/test/', params="k1=v1&k2=水电费&k3=v3&k3=vv3")
    requests.request(method='get', url='http://127.0.0.1:8000/test/', params=bytes("k1=v1&k2=k2&k3=v3&k3=vv3", encoding='utf8'))
2.5
data,传请求体
   
 requests.request(
    method='POST',
    url='http://127.0.0.1:8000/test/',
    data="k1=v1;k2=v2;k3=v3;k3=v4",
    headers={'Content-Type': 'application/x-www-form-urlencoded'}
   )
    requests.request(

    method='POST',
     url='http://127.0.0.1:8000/test/',
    data=open('data_file.py', mode='r', encoding='utf-8'),
    #文件内容是:k1=v1;k2=v2;k3=v3;k3=v4
    headers={'Content-Type': 'application/x-www-form-urlencoded'}
   ) requests.post( ..., data
={'user':'root','pwd':'123'} ) GET /index http1.1\r\nhost:c1.com\r\n\r\nuser=root&pwd=123 2.6 json,传请求体 # 将json中对应的数据进行序列化成一个字符串,json.dumps(...) # 而后发送到服务器端的body中,而且Content-Type是 {'Content-Type': 'application/json'} requests.request(
    method='POST', url='http://127.0.0.1:8000/test/', json={'k1': 'v1', 'k2': '水电费'}
)
 2.7代理 

#无验证
proxie_dict = { "http": "77.75.105.165", "https": "77.75.105.123", }
ret = requests.get("http://www.baidu.com",proxies=
proxie_dict)
   print(ret.headers)

#验证代理
  from requests.auth import HTTPProxyAuth
  proxyDict = { 'http': '77.75.105.165', 'https': '77.75.105.165' }
  auth = HTTPProxyAuth('username', 'mypassword')
  r = requests.get("http://www.google.com", proxies=proxyDict, auth=auth)
  print(r.text)


   2.8上传文件 files
  
# 发送文件
   
file_dict = { 'f1': open('xxxx.log','rb') }
requests.request( method='POST', url = 'http://127.0.0.1:8000/test/', files=
file_dict )
   # 发送文件,定制文件名   file_dict = {    'f1': ('test.txt', open('readme', 'rb'))
    }   requests.request(method='POST',url='http://127.0.0.1:8000/test/',files=file_dict)
  # 发送文件,定制文件名   file_dict = {     'f1': ('test.txt', "hahsfaksfa9kasdjflaksdjf")   } requests.request(method='POST',url='http://127.0.0.1:8000/test/',files=file_dict)

  # 发送文件,定制文件名
    file_dict = {
      'f1': ('test.txt', "hahsfaksfa9kasdjflaksdjf", 'application/text', {'k1': '0'})
    }
    requests.request(method='POST', url='http://127.0.0.1:8000/test/', files=file_dict)


  
2.9 认证 auth
    
内部: 用户名和密码,用户名和密码加密,放在请求头中传给后台。
      -"用户|密码" -base64("用户:密码")
      -"basic base64("用户|密码")"

      -请求头: Authorization: "basic base64("用户|密码")"
    
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
    ret = requests.get('https://api.github.com/user',auth=HTTPBasicAuth('qwqw','11213'
))
2.10超时
     ret1 = requests.get('http://google.com/', timeout=1)
print(ret1)
ret2 = requests.get('http://google.com/', timeout=(5, 1))
print(ret2)


2.11是否容许重定向
allow_redirects ret
= requests.get("http://127.0.0.1:8000/test/",allow_redirects=False)
print
(ret.text)
2.12 大文件下载stream
    ret = requests.get('http://127.0.0.1:8000/test/', stream=True)
    print(ret.content)
    ret.close()


  from contextlib import closing with closing(requests.get('http://htttpbin.org/get',stream=True)) as r1: #在此处理响应
     for i in r1.iter_content():
       print
(i)

2.13证书 cert
-百度、腾讯 =》不用携带证书(系统自动获取证书)
-自定义证书
requests.get(
'http://127.0.0.1:8000/test/',cert="xxxx/xxx/xxx.pem")
requests.get('http://127.0.0.1:8000/test/',cert=("xxxx/xxx/xxx.pem","xxx.xx.xx.key"
))

2.14 确认
   verify = False

  2.15session
    import requests   session = requests.Session()  ### 一、首先登录任何页面,获取cookie    i1 = session.get(url="http://dig.chouti.com/help/service") ### 二、用户登录,携带上一次的cookie,后台对cookie中的 gpsd 进行受权   i2 = session.post(   url="http://dig.chouti.com/login",   data={    'phone': "8615131255089",   'password': "xxxxxx",   'oneMonth': "" } )  i3 = session.post( url="http://dig.chouti.com/link/vote?linksId=8589623", ) print(i3.text)
 
 

 

 五、总结api

1、爬虫基本操做
    a、爬虫
        -定向
        -非定向
    
    b、
        下载页面:
        
        筛选:
            正则表达式
        =========开源模块=============================
        1、requests模块
            -方法
            -参数
            -session
                session = requests.Session()
                session.get()
                session.post()
            response = requests.get('http://www.autohome.com.cn/news/')
            response.txt
            
            总结:
                response = requests.get('URL')
                response.tetx
                response.content
                response.encoding
                response.aparent_encoding
                response.status_code
                
                
                
                
                
        2、beautisoup模块
            soup = BeautiSoup(response.txt,parser='html.parser')
            target=soup.find(id = 'auto-channel-lazyload-article')
            
            
            总结:
            find #匹配的第一个
            find_all#匹配的全部
            
            soup = beautifulsoup('<html>...</html>',features='html.parser')
            v1 = soup.find('div')#找soup孩子里面的第一个div
            v1 = soup.find(id='i1')#找soup孩子里面的第一个id=i1
            v1 = soup.find('div',id='i1')#找soup孩子里面的第一个div而且id=i1
            
            v2 = soup.find_all('div')
            v2 = soup.find_all(id='i1')
            v2 = soup.find_all('div',id='i1')
            
            obj = v1
            
            obj = v2[0]
            obj.text#获取对象的文本
            obj.attrs#获取对象的属性
        
        登陆:
            页面刷新:form 表单提交
            页面不刷新:Ajax提交
        需求二:
        
            经过程序自动登陆github
            requests
        需求三:爬取GitHub
            -带请求头
            -带cookie
            -请求体中:
                commit: Sign in
                utf8: ✓
                authenticity_token: iWlPKAsJ9nQNDaqC47P27GWx37a08iBv/0io8C4QPUluL1JxyWJSt0ZlgBBWv3BeFJ4ywbR5dKWzSqwzhILH6Q==
                login: Yun-Wangj
                password: yun258762
        需求四:登陆拉勾网
            -密码加密
                -找js,经过python实现加密方式
                -找密文,密文<=>密文
            -Referer头是上一次请求的地址,可用于作防盗链
        
        总结:
            请求头:
                user-agent
                referer
                host
                cookie
                
                特殊请求头,查看上一次请求获取内容,如拉勾网
            请求体:
                -原始数据
                -原始数据 + token
                -密文
                    -找算法
                    -使用密文
                    
            两种套路:
                -post登陆获取cookie,之后携带cookie,
                -get获取未受权cookie,post登陆携带cookie去受权,之后携带cookie

 

 

 

BeautifulSoup模块

  BeautifulSoup是一个模块,该模块用于接收一个HTML或XML字符串,而后将其进行格式化,浏览器

以后遍可使用他提供的方法进行快速查找指定元素,从而使得在HTML或XML中查找指定元素变得简单。服务器

 

from bs4 import BeautifulSoup
 
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
asdf
    <div class="title">
        <b>The Dormouse's story总共</b>
        <h1>f</h1>
    </div>
<div class="story">Once upon a time there were three little sisters; and their names were
    <a  class="sister0" id="link1">Els<span>f</span>ie</a>,
    <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
    <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</div>
ad<br/>sf
<p class="story">...</p>
</body>
</html>
"""
 
soup = BeautifulSoup(html_doc, features="lxml")
# 找到第一个a标签
tag1 = soup.find(name='a')
# 找到全部的a标签
tag2 = soup.find_all(name='a')
# 找到id=link2的标签
tag3 = soup.select('#link2')

 

安装:

pip3 install beautifulsoup4

 

使用示例:

from bs4 import BeautifulSoup
 
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
    ...
</body>
</html>
"""
 
soup = BeautifulSoup(html_doc, features="lxml")

 

  1. name,标签名称

# tag = soup.find('a')
# name = tag.name # 获取
# print(name)
# tag.name = 'span' # 设置
# print(soup)

 

  2. attr,标签属性

# tag = soup.find('a')
# attrs = tag.attrs    # 获取
# print(attrs)
# tag.attrs = {'ik':123} # 设置
# tag.attrs['id'] = 'iiiii' # 设置
# print(soup)

 

  3. children,全部子标签

# body = soup.find('body')
# v = body.children

 

  4. children,全部子子孙孙标签

# body = soup.find('body')
# v = body.descendants

 

  5. clear,将标签的全部子标签所有清空(保留标签名)

# tag = soup.find('body')
# tag.clear()
# print(soup)

 

  6. decompose,递归的删除全部的标签

# body = soup.find('body')
# body.decompose()
# print(soup)

 

  7. extract,递归的删除全部的标签,并获取删除的标签

# body = soup.find('body')
# v = body.extract()
# print(soup)

 

  8. decode,转换为字符串(含当前标签);decode_contents(不含当前标签)

# body = soup.find('body')
# v = body.decode()
# v = body.decode_contents()
# print(v)

 

   9. encode,转换为字节(含当前标签);encode_contents(不含当前标签)

# body = soup.find('body')
# v = body.encode()
# v = body.encode_contents()
# print(v)

 

 

  10. find,获取匹配的第一个标签

# tag = soup.find('a')
# print(tag)
# tag = soup.find(name='a', attrs={'class': 'sister'}, recursive=True, text='Lacie')
# tag = soup.find(name='a', class_='sister', recursive=True, text='Lacie')
# print(tag)

 

 

  11. find_all,获取匹配的全部标签

# tags = soup.find_all('a')
# print(tags)
 
# tags = soup.find_all('a',limit=1)
# print(tags)
 
# tags = soup.find_all(name='a', attrs={'class': 'sister'}, recursive=True, text='Lacie')
# # tags = soup.find(name='a', class_='sister', recursive=True, text='Lacie')
# print(tags)
 
 
# ####### 列表 #######
# v = soup.find_all(name=['a','div'])
# print(v)
 
# v = soup.find_all(class_=['sister0', 'sister'])
# print(v)
 
# v = soup.find_all(text=['Tillie'])
# print(v, type(v[0]))
 
 
# v = soup.find_all(id=['link1','link2'])
# print(v)
 
# v = soup.find_all(href=['link1','link2'])
# print(v)
 
# ####### 正则 #######
import re
# rep = re.compile('p')
# rep = re.compile('^p')
# v = soup.find_all(name=rep)
# print(v)
 
# rep = re.compile('sister.*')
# v = soup.find_all(class_=rep)
# print(v)
 
# rep = re.compile('http://www.oldboy.com/static/.*')
# v = soup.find_all(href=rep)
# print(v)
 
# ####### 方法筛选 #######
# def func(tag):
# return tag.has_attr('class') and tag.has_attr('id')
# v = soup.find_all(name=func)
# print(v)
 
 
# ## get,获取标签属性
# tag = soup.find('a')
# v = tag.get('id')
# print(v)

 

 

  12. has_attr,检查标签是否具备该属性

# tag = soup.find('a')
# v = tag.has_attr('id')
# print(v)

 

 

  13. get_text,获取标签内部文本内容

# tag = soup.find('a')
# v = tag.get_text('id')
# print(v)

 

 

  14. index,检查标签在某标签中的索引位置

# tag = soup.find('body')
# v = tag.index(tag.find('div'))
# print(v)
 
# tag = soup.find('body')
# for i,v in enumerate(tag):
# print(i,v)

 

 

  15. is_empty_element,是不是空标签(是否能够是空)或者自闭合标签,

判断是不是以下标签:'br' , 'hr', 'input', 'img', 'meta','spacer', 'link', 'frame', 'base'

# tag = soup.find('br')
# v = tag.is_empty_element
# print(v)

 

 

  16. 当前的关联标签

# soup.next
# soup.next_element
# soup.next_elements
# soup.next_sibling
# soup.next_siblings
 
#
# tag.previous
# tag.previous_element
# tag.previous_elements
# tag.previous_sibling
# tag.previous_siblings
 
#
# tag.parent
# tag.parents

 

 

  17. 查找某标签的关联标签

# tag.find_next(...)
# tag.find_all_next(...)
# tag.find_next_sibling(...)
# tag.find_next_siblings(...)
 
# tag.find_previous(...)
# tag.find_all_previous(...)
# tag.find_previous_sibling(...)
# tag.find_previous_siblings(...)
 
# tag.find_parent(...)
# tag.find_parents(...)
 
# 参数同find_all

 

 

  18. select,select_one, CSS选择器

soup.select("title")
 
soup.select("p nth-of-type(3)")
 
soup.select("body a")
 
soup.select("html head title")
 
tag = soup.select("span,a")
 
soup.select("head > title")
 
soup.select("p > a")
 
soup.select("p > a:nth-of-type(2)")
 
soup.select("p > #link1")
 
soup.select("body > a")
 
soup.select("#link1 ~ .sister")
 
soup.select("#link1 + .sister")
 
soup.select(".sister")
 
soup.select("[class~=sister]")
 
soup.select("#link1")
 
soup.select("a#link2")
 
soup.select('a[href]')
 
soup.select('a[href="http://example.com/elsie"]')
 
soup.select('a[href^="http://example.com/"]')
 
soup.select('a[href$="tillie"]')
 
soup.select('a[href*=".com/el"]')
 
 
from bs4.element import Tag
 
def default_candidate_generator(tag):
    for child in tag.descendants:
        if not isinstance(child, Tag):
            continue
        if not child.has_attr('href'):
            continue
        yield child
 
tags = soup.find('body').select("a", _candidate_generator=default_candidate_generator)
print(type(tags), tags)
 
from bs4.element import Tag
def default_candidate_generator(tag):
    for child in tag.descendants:
        if not isinstance(child, Tag):
            continue
        if not child.has_attr('href'):
            continue
        yield child
 
tags = soup.find('body').select("a", _candidate_generator=default_candidate_generator, limit=1)
print(type(tags), tags)

 

 

  19. 标签的内容

# tag = soup.find('span')
# print(tag.string)          # 获取
# tag.string = 'new content' # 设置
# print(soup)
 
# tag = soup.find('body')
# print(tag.string)
# tag.string = 'xxx'
# print(soup)
 
# tag = soup.find('body')
# v = tag.stripped_strings  # 递归内部获取全部标签的文本
# print(v)

 

 

  20.append在当前标签内部追加一个标签

# tag = soup.find('body')
# tag.append(soup.find('a'))
# print(soup)
#
# from bs4.element import Tag
# obj = Tag(name='i',attrs={'id': 'it'})
# obj.string = '我是一个新来的'
# tag = soup.find('body')
# tag.append(obj)
# print(soup)

 

 

  21.insert在当前标签内部指定位置插入一个标签

# from bs4.element import Tag
# obj = Tag(name='i', attrs={'id': 'it'})
# obj.string = '我是一个新来的'
# tag = soup.find('body')
# tag.insert(2, obj)
# print(soup)

 

 

  22. insert_after,insert_before 在当前标签后面或前面插入

# from bs4.element import Tag
# obj = Tag(name='i', attrs={'id': 'it'})
# obj.string = '我是一个新来的'
# tag = soup.find('body')
# # tag.insert_before(obj)
# tag.insert_after(obj)
# print(soup)

 

 

  23. replace_with 在当前标签替换为指定标签

# from bs4.element import Tag
# obj = Tag(name='i', attrs={'id': 'it'})
# obj.string = '我是一个新来的'
# tag = soup.find('div')
# tag.replace_with(obj)
# print(soup)

 

 

  24. 建立标签之间的关系

# tag = soup.find('div')
# a = soup.find('a')
# tag.setup(previous_sibling=a)
# print(tag.previous_sibling)

 

 

  25. wrap,将指定标签把当前标签包裹起来

# from bs4.element import Tag
# obj1 = Tag(name='div', attrs={'id': 'it'})
# obj1.string = '我是一个新来的'
#
# tag = soup.find('a')
# v = tag.wrap(obj1)
# print(soup)
 
# tag = soup.find('a')
# v = tag.wrap(soup.find('p'))
# print(soup)

 

 

  26. unwrap,去掉当前标签,将保留其包裹的标签

# tag = soup.find('a')
# v = tag.unwrap()
# print(soup)

更多参数官方:http://beautifulsoup.readthedocs.io/zh_CN/v4.4.0/

自动登陆示例:

  抽屉新热榜

 

import requests


# ############## 方式一 ##############
"""
# ## 一、首先登录任何页面,获取cookie
i1 = requests.get(url="http://dig.chouti.com/help/service")
i1_cookies = i1.cookies.get_dict()

# ## 二、用户登录,携带上一次的cookie,后台对cookie中的 gpsd 进行受权
i2 = requests.post(
    url="http://dig.chouti.com/login",
    data={
        'phone': "86178#########",
        'password': "xxooxxoo",
        'oneMonth': ""
    },
    cookies=i1_cookies
)

# ## 三、点赞(只须要携带已经被受权的gpsd便可)
gpsd = i1_cookies['gpsd']
i3 = requests.post(
    url="http://dig.chouti.com/link/vote?linksId=8589523",
    cookies={'gpsd': gpsd}
)

print(i3.text)
"""


# ############## 方式二 ##############
"""
import requests

session = requests.Session()
i1 = session.get(url="http://dig.chouti.com/help/service")
i2 = session.post(
    url="http://dig.chouti.com/login",
    data={
        'phone': "8615131255089",
        'password': "xxooxxoo",
        'oneMonth': ""
    }
)
i3 = session.post(
    url="http://dig.chouti.com/link/vote?linksId=8589523"
)
print(i3.text)

"""

 

 

 

  

  GitHub

 

import requests
from bs4 import BeautifulSoup

# ############## 方式一 ##############
#
# # 1. 访问登录页面,获取 authenticity_token
# i1 = requests.get('https://github.com/login')
# soup1 = BeautifulSoup(i1.text, features='lxml')
# tag = soup1.find(name='input', attrs={'name': 'authenticity_token'})
# authenticity_token = tag.get('value')
# c1 = i1.cookies.get_dict()
# i1.close()
#
# # 1. 携带authenticity_token和用户名密码等信息,发送用户验证
# form_data = {
# "authenticity_token": authenticity_token,
#     "utf8": "",
#     "commit": "Sign in",
#     "login": "XXXXX@163.com",
#     'password': 'xxoo'
# }
#
# i2 = requests.post('https://github.com/session', data=form_data, cookies=c1)
# c2 = i2.cookies.get_dict()
# c1.update(c2)
# i3 = requests.get('https://github.com/settings/repositories', cookies=c1)
#
# soup3 = BeautifulSoup(i3.text, features='lxml')
# list_group = soup3.find(name='div', class_='listgroup')
#
# from bs4.element import Tag
#
# for child in list_group.children:
#     if isinstance(child, Tag):
#         project_tag = child.find(name='a', class_='mr-1')
#         size_tag = child.find(name='small')
#         temp = "项目:%s(%s); 项目路径:%s" % (project_tag.get('href'), size_tag.string, project_tag.string, )
#         print(temp)



# ############## 方式二 ##############
# session = requests.Session()
# # 1. 访问登录页面,获取 authenticity_token
# i1 = session.get('https://github.com/login')
# soup1 = BeautifulSoup(i1.text, features='lxml')
# tag = soup1.find(name='input', attrs={'name': 'authenticity_token'})
# authenticity_token = tag.get('value')
# c1 = i1.cookies.get_dict()
# i1.close()
#
# # 1. 携带authenticity_token和用户名密码等信息,发送用户验证
# form_data = {
#     "authenticity_token": authenticity_token,
#     "utf8": "",
#     "commit": "Sign in",
#     "login": "XXXXX@163.com",
#     'password': 'xxoo'
# }
#
# i2 = session.post('https://github.com/session', data=form_data)
# c2 = i2.cookies.get_dict()
# c1.update(c2)
# i3 = session.get('https://github.com/settings/repositories')
#
# soup3 = BeautifulSoup(i3.text, features='lxml')
# list_group = soup3.find(name='div', class_='listgroup')
#
# from bs4.element import Tag
#
# for child in list_group.children:
#     if isinstance(child, Tag):
#         project_tag = child.find(name='a', class_='mr-1')
#         size_tag = child.find(name='small')
#         temp = "项目:%s(%s); 项目路径:%s" % (project_tag.get('href'), size_tag.string, project_tag.string, )
#         print(temp)

 

 

 

  知乎

 

import time

import requests
from bs4 import BeautifulSoup

session = requests.Session()

i1 = session.get(
    url='https://www.zhihu.com/#signin',
    headers={
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
    }
)

soup1 = BeautifulSoup(i1.text, 'lxml')
xsrf_tag = soup1.find(name='input', attrs={'name': '_xsrf'})
xsrf = xsrf_tag.get('value')

current_time = time.time()
i2 = session.get(
    url='https://www.zhihu.com/captcha.gif',
    params={'r': current_time, 'type': 'login'},
    headers={
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
    })

with open('zhihu.gif', 'wb') as f:
    f.write(i2.content)

captcha = input('请打开zhihu.gif文件,查看并输入验证码:')
form_data = {
    "_xsrf": xsrf,
    'password': 'xxooxxoo',
    "captcha": 'captcha',
    'email': '##########@163.com'
}
i3 = session.post(
    url='https://www.zhihu.com/login/email',
    data=form_data,
    headers={
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
    }
)

i4 = session.get(
    url='https://www.zhihu.com/settings/profile',
    headers={
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
    }
)

soup4 = BeautifulSoup(i4.text, 'lxml')
tag = soup4.find(id='rename-section')
nick_name = tag.find('span',class_='name').string
print(nick_name)

 

 

 

 

  拉勾网:

 

import requests


# 第一步:访问登录页,拿到X_Anti_Forge_Token,X_Anti_Forge_Code
# 一、请求url:https://passport.lagou.com/login/login.html
# 二、请求方法:GET
# 三、请求头:
#    User-agent
r1 = requests.get('https://passport.lagou.com/login/login.html',
                 headers={
                     'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
                 },
                 )

X_Anti_Forge_Token = re.findall("X_Anti_Forge_Token = '(.*?)'", r1.text, re.S)[0]
X_Anti_Forge_Code = re.findall("X_Anti_Forge_Code = '(.*?)'", r1.text, re.S)[0]
print(X_Anti_Forge_Token, X_Anti_Forge_Code)
# print(r1.cookies.get_dict())
# 第二步:登录
# 一、请求url:https://passport.lagou.com/login/login.json
# 二、请求方法:POST
# 三、请求头:
#    cookie
#    User-agent
#    Referer:https://passport.lagou.com/login/login.html
#    X-Anit-Forge-Code:53165984
#    X-Anit-Forge-Token:3b6a2f62-80f0-428b-8efb-ef72fc100d78
#    X-Requested-With:XMLHttpRequest
# 四、请求体:
# isValidate:true
# username:17821281271
# password:ab18d270d7126ea65915c50288c22c0d
# request_form_verifyCode:''
# submit:''
r2 = requests.post(
    'https://passport.lagou.com/login/login.json',
    headers={
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
        'Referer': 'https://passport.lagou.com/login/login.html',
        'X-Anit-Forge-Code': X_Anti_Forge_Code,
        'X-Anit-Forge-Token': X_Anti_Forge_Token,
        'X-Requested-With': 'XMLHttpRequest'
    },
    data={
        "isValidate": True,
        'username': '17821281271',
        'password': 'ab18d270d7126ea65915c50288c22c0d',
        'request_form_verifyCode': '',
        'submit': ''
    },
    cookies=r1.cookies.get_dict()
)
print(r2.text)

 

 

 

 

   博客园

 

import re
import json
import base64

import rsa
import requests


def js_encrypt(text):
    b64der = 'MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCp0wHYbg/NOPO3nzMD3dndwS0MccuMeXCHgVlGOoYyFwLdS24Im2e7YyhB0wrUsyYf0/nhzCzBK8ZC9eCWqd0aHbdgOQT6CuFQBMjbyGYvlVYU2ZP7kG9Ft6YV6oc9ambuO7nPZh+bvXH0zDKfi02prknrScAKC0XhadTHT3Al0QIDAQAB'
    der = base64.standard_b64decode(b64der)

    pk = rsa.PublicKey.load_pkcs1_openssl_der(der)
    v1 = rsa.encrypt(bytes(text, 'utf8'), pk)
    value = base64.encodebytes(v1).replace(b'\n', b'')
    value = value.decode('utf8')

    return value


session = requests.Session()

i1 = session.get('https://passport.cnblogs.com/user/signin')
rep = re.compile("'VerificationToken': '(.*)'")
v = re.search(rep, i1.text)
verification_token = v.group(1)

form_data = {
    'input1': js_encrypt('wptawy'),
    'input2': js_encrypt('asdfasdf'),
    'remember': False
}

i2 = session.post(url='https://passport.cnblogs.com/user/signin',
                  data=json.dumps(form_data),
                  headers={
                      'Content-Type': 'application/json; charset=UTF-8',
                      'X-Requested-With': 'XMLHttpRequest',
                      'VerificationToken': verification_token}
                  )

i3 = session.get(url='https://i.cnblogs.com/EditDiary.aspx')

print(i3.text)
相关文章
相关标签/搜索