pip3 install requests
pip3 install beautifulsoup4
import requests from bs4 import BeautifulSoup import uuid reponse = requests.get(url="https://www.autohome.com.cn/news/") reponse.encoding = reponse.apparent_encoding #获取文本原来编码,使二者编码一致才能正确显示 soup = BeautifulSoup(reponse.text,'html.parser') #使用的是html解析,通常使用lxml解析更好 target = soup.find(id="auto-channel-lazyload-article") #find根据属性去获取对象,id,attr,tag...自定义属性 li_list = target.find_all('li') #列表形式 for li in li_list: a_tag = li.find('a') if a_tag: href = a_tag.attrs.get("href") #属性是字典形式,使用get获取指定属性 title = a_tag.find("h3").text #find获取的是对象含有标签,获取text img_src = "http:"+a_tag.find("img").attrs.get('src') print(href) print(title) print(img_src) img_reponse = requests.get(url=img_src) file_name = str(uuid.uuid4())+'.jpg' #设置一个不重复的图片名 with open(file_name,'wb') as fp: fp.write(img_reponse.content)
reponse = requests.get(url) #根据url获取响应对象 reponse.apparent_encoding #获取文本的原来编码 reponse.encoding #对文本编码进行设置 reponse.text #获取文本内容,str类型 reponse.content #获取数据,byte类型 reponse.status_code #获取响应状态码
soup = BeautifulSoup('网页代码','html.parser') #获取HTML对象 target = soup.find(id="auto-channel-lazyload-article") #根据自定义属性获取标签对象,默认找到第一个 li_list = target.find_all('li') #根据标签名,获取全部的标签对象,放入列表中 注意:是自定义标签均可以查找 v1 = soup.find('div') v1 = soup.find(id='il') v1 = soup.find('div',id='i1') find_all同样 对于获取的标签对象,咱们可使用 obj.text 获取文本 obj.attrs 获取属性字典
def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('get', url, params=params, **kwargs) def options(url, **kwargs): r"""Sends an OPTIONS request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('options', url, **kwargs) def head(url, **kwargs): r"""Sends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return request('head', url, **kwargs) def post(url, data=None, json=None, **kwargs): r"""Sends a POST request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('post', url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): r"""Sends a PUT request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('put', url, data=data, **kwargs) def patch(url, data=None, **kwargs): r"""Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('patch', url, data=data, **kwargs) def delete(url, **kwargs): r"""Sends a DELETE request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('delete', url, **kwargs)
from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or list of tuples ``[(key, value)]`` (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'http://httpbin.org/get') <Response [200]> """ # By using the 'with' statement we are sure the session is closed, thus we # avoid leaving sockets open which can trigger a ResourceWarning in some # cases, and look like a memory leak in others. with sessions.Session() as session: return session.request(method=method, url=url, **kwargs)
:param method: 提交方式get,post,put,patch,delete,options,head
:param url: 提交地址
:param params: 在URL中传递的参数 GET
request.request(method='GET',url='http://xxxx.com',params={'k1':'v1','k2':'v2'})
会自动装换为http://xxxx.com?k1=v1&k2=v2
:param data: 在请求体中传递的数据,字典,字节,文件对象 POST
request.request(method='GET',url='http://xxxx.com',data={'user':'aaaa','password':'bbb'})
虽然显示为字典形式,可是会在传递时也转换为data = "user=aaaa&password=bbbb"
:param json: 存放在Django中请求体中的body中--->request.body中
request.request(method='GET',url='http://xxxx.com',json={'user':'aaaa','password':'bbb'})
会将json数据装换为字符串形式 json="{'user':'aaaa','password':'bbb'}",存放在请求体的body中
和data相比:data中只能存放基础类型,不能存放字典,列表等,二json只是将数据字符串化,因此能够存放这些数据类型
:param headers: 请求头
能够用于防止别人使用脚本登陆网站,例如上面抽屉自动登陆就是根据请求头中用户代理,来过滤用户。也可使用Referer看上一步网站位置,能够防止盗链等
:param cookies: cookies,存放在请求头中,传递时是放在headers中传递过去的
:param files: 用于post方式传递文件时使用。使用键值对形式
request.post(usl='xxx',files={
'f1':open('s1.py','rb'), #传递的name:文件对象/文件内容 'f1':'dawfwafawfawf'
'f2':('newf1name',open('s1.py','rb') #元组中第一个参数,是上传到服务器中的文件名,可指定
})
:param auth: 权限验证,通常用于在web前端对数据进行加密base64加密。,一些网站在登陆时,使用登陆框输入用户密码后,在前端进行加密,而后将数据存放在请求头中
ret = requests.get('https://api.github.com/user',
auth=HTTPBasicAuth('用户名', '密码')
)
:param timeout: 超时float或者元组 一个参数时为float,表明等待服务器返回响应内容的时间,两个参数时为元组形式,第一个表明链接网站超时时间,第二个表明等待服务器响应的超时时间 ret = requests.get('http://google.com/', timeout=1)
ret = requests.get('http://google.com/', timeout=(5, 1))
:param allow_redirects: 容许重定向,类型为布尔型,默认为True,容许后,会去获取重定向后的页面数据进行返回
requests.get('http://127.0.0.1:8000/test/', allow_redirects=False)
:param proxies: 代理,例如电脑出口IP(公网IP,非局域网)限制,以实现同IP操做限制。咱们联系不一样人员经过不一样公网IP去操做,以实现解除操做限制,这些人员称为代理
技术上使用:代理服务器,咱们向代理服务器上发送数据,让服务器替咱们去选用代理IP去向指定的网站发送请求
request.post(
url = "http://dig.chouti.com/log",
data = form_data,
proxys = {
'http':'http://代理服务器地址:端口',
'https':'http://代理服务器地址:端口',
}
)
:param stream: 流方式获取文件数据,下载一点数据到内存,就保存到磁盘中,每下载一点就保存一点。防止由于内存不足文件过大而不能完成下载任务状况
from contextlib import closing
with closing(requests.get('http://httpbin.org/get', stream=True)) as r:
# 在此处理响应。
for i in r.iter_content():
print(i)
:param cert: 带HTTPS时,通道进行ssl加密,原来http是使用socket,数据未加密,不安全。如今的HTTPS是含有加密解密过程。须要证书存在
一种是:自定义证书,客户端须要客户本身去安装证书
request.get(
url="https:...",
cert="xxx.pem", #每次访问须要携带证书,格式是pem,('.crt','.key')<两个文件都须要携带,一块儿拼接加密>,两种文件验证方法
)
另外一种是:在系统中带有的认证证书,须要去购买,厂家和系统联合,系统内置,直接对网站解析验证
:param verify: 布尔类型,当为false时,忽略上面cert证书的存在,照样能够获取结果,通常网站为了用户便利,是容许这种状况
对于上面的自动登陆时,cookie和session等会话期间产生的数据须要咱们本身手动管理。而session方法,会将请求获取的响应cookie和响应体等放入全局变量中,之后咱们访问该网站时,会将这些数据自动携带一块儿发生过去。html
注意:对于请求头咱们本身仍是须要去配置的前端
import requests headers = {} headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36' headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' session = requests.session() i1 = session.get("https://dig.chouti.com/",headers=headers) i1.close() form_data = { 'phone':"xxxx", 'password':"xxxx", 'oneMonth':'' } i2 = session.post(url="https://dig.chouti.com/login",data=form_data,headers=headers) i3 = session.post("https://dig.chouti.com/link/vote?linksId=20324146",headers=headers) print(i3.text) {"result":{"code":"9999", "message":"推荐成功", "data":{"jid":"cdu_52941024478","likedTime":"1529507291930000","lvCount":"7","nick":"山上有风景","uvCount":"3","voteTime":"小于1分钟前"}}}
推文:django-request对象git
不管咱们发送什么样的格式,都会到request.body中,而request.post中可能没有值
依据的是请求头中的content-type来判断类型
例如: Content-Type: text/html;charset:utf-8; 常见的媒体格式类型以下: text/html : HTML格式 text/plain :纯文本格式 text/xml : XML格式 image/gif :gif图片格式 image/jpeg :jpg图片格式 image/png:png图片格式 以application开头的媒体格式类型: application/xhtml+xml :XHTML格式 application/xml : XML数据格式 application/atom+xml :Atom XML聚合格式 application/json : JSON数据格式 application/pdf :pdf格式 application/msword : Word文档格式 application/octet-stream : 二进制流数据(如常见的文件下载) application/x-www-form-urlencoded : <form encType=””>中默认的encType,form表单数据被编码为key/value格式发送到服务器(表单默认的提交数据的格式) 另一种常见的媒体格式是上传文件之时使用的: multipart/form-data : 须要在表单中进行文件上传时,就须要使用该格式 以上就是咱们在平常的开发中,常常会用到的若干content-type的内容格式。
例如:当我使用post传递数据,在服务端接收请求体,存放在request.body中,
而后到请求头中查询content-type:application/x-www-form-urlencoded
再将接收的请求体拷贝到request.post中存放
from bs4 import BeautifulSoup html = ''' <html lang="en"> <head> <meta charset="UTF-8"> <title>Title</title> </head> <body> <a href="/wwewe/fafwaw" class="btn btn2">666daw6fw</a> <div id="content" for='1'> <p>div>p <label>title</label> </p> </div> <hr/> <p id="bott">div,p</p> </body> </html> ''' soup = BeautifulSoup(html,features="lxml")
tag = soup.find("a") print(tag.name) #a tag = soup.find(id="content") print(tag.name) #div
tag = soup.find('a') print(tag.attrs) #{'href': '/wwewe/fafwaw', 'class': ['btn', 'btn2']} print(tag.attrs['href']) #/wwewe/fafwaw tag.attrs['id']="btn-primary" #添加 del tag.attrs['class'] #删除 tag.attrs['href']="/change" #改 print(tag.attrs) #{'id': 'btn-primary', 'href': '/change'}
body = soup.find("body") print(body.children) #list_iterator object,只会获取子标签,对于内部子孙标签是做为字符串形式存在 from bs4.element import Tag for child in body.children: # print(type(child)) # <class 'bs4.element.NavigableString'>字符串类型,通常是换行符,空格等 # <class 'bs4.element.Tag'>子节点类型 if type(child) == Tag: print(child)
body = soup.find("body") for child in body.descendants: #会将内部子孙标签提出来,再次进行一次操做 # print(type(child)) # <class 'bs4.element.NavigableString'>字符串类型,通常是换行符,空格等 # <class 'bs4.element.Tag'>子节点类型 if type(child) == Tag: print(child)
body = soup.find("body") body.clear() #清空子标签,保留本身 print(soup) #body标签存在,内部为空
body = soup.find('body') body.decompose() #递归删除,包含本身 print(soup) #body标签不存在
body = soup.find('body') deltag = body.extract() #递归删除,包含本标签 print(soup) #无body标签 print(deltag) #是全部咱们删除的标签
#用字符串形式输出,也能够直接输出,内置__str__方法 body = soup.find('body') v = body.decode() #含有当前标签 print(v) v = body.decode_contents() #不含当前标签 print(v)
#转换为字节类型 body = soup.find('body') v = body.encode() #含有body print(v) v = body.encode_contents() #不含body print(v)
tag = soup.find(name="p") #默认是寻找全部子孙节点的数据,递归查找 print(tag) #找到子孙下的第一个 tag = soup.find(name='p',recursive=False) print(tag) #None 是由于,当前标签是html标签,而不是body tag = soup.find('body').find('p') print(tag) ##找到子孙下的第一个 tag = soup.find('body').find('p',recursive=False) print(tag) #<p>div,p</p> tag = soup.find('body').find('div',attrs={"id":"content","for":"1"},recursive=False) print(tag) #找到该标签
tags = soup.find_all('p') print(tags) tags = soup.find_all('p',limit=1) #只去获取一个,可是返回仍是列表 print(tags) tags = soup.find_all('p',attrs={'id':"bott"}) #按属性查找 print(tags) tags = soup.find_all(name=['p','a']) #查找全部p,a标签 print(tags) tags = soup.find("body").find_all(name=['p','a'],recursive=False) #查找全部p,a标签,只找子标签 print(tags) tags = soup.find("body").find_all(name=['p','a'],text="div,p") #查找全部文本时div,p的标签 print(tags) 正则匹配: import re pat = re.compile("p") tags = soup.find_all(name=pat) print(tags) pat = re.compile("^lab") #查找全部以lab开头的标签 tags = soup.find_all(name=pat) print(tags) pat = re.compile(".*faf.*") tags = soup.find_all(attrs={"href":pat}) #或者直接href=pat print(tags) pat = re.compile("cont.*") tags = soup.find_all(id=pat) print(tags) 函数匹配: def func(tag): return tag.has_attr("class") and tag.has_attr("href") tags = soup.find_all(name=func) print(tags)
tag = soup.find('a') print(tag.get("href")) #获取标签属性 print(tag.attrs.get("href")) #获取标签属性 print(tag.has_attr("href"))
tag = soup.find(id='content') print(tag.get_text()) #获取标签的文本内容,会获取全部的子孙标签文本 tag = soup.find("label") print(tag.get_text()) #title print(tag.string) #title tag.string = "test" print(tag.get_text()) #test
body = soup.find("body") child_tag = body.find("div",recursive=False) if child_tag: print(body.index(child_tag)) #必须是其子标签,不是子孙标签
tag = soup.find('hr') print(tag.is_empty_element) #判断是否是空标签,闭合标签
tag.next
tag.next_element
tag.next_elements #会包含有字符串文本类型 tag.next_sibling #只获取标签对象Tag
tag.next_siblings
tag.previous
tag.previous_element
tag.previous_elements
tag.previous_sibling
tag.previous_siblings
tag.parent
tag.parents
tag = soup.find(id="content") print(tag) print(tag.next) #下一个元素,这里是换行符 print(tag.next_element) #下一个元素,这里是换行符 print(tag.next_elements) #下面的全部子孙标签,都会提出来进行一次迭代 for ele in tag.next_elements: print(ele) print(tag.next_sibling) #只去获取子标签 print(tag.next_siblings) #只含有子标签 for ele in tag.next_siblings: print(ele)
tag.find_next(...)
tag.find_all_next(...)
tag.find_next_sibling(...)
tag.find_next_siblings(...)
tag.find_previous(...)
tag.find_all_previous(...)
tag.find_previous_sibling(...)
tag.find_previous_siblings(...)
tag.find_parent(...)
tag.find_parents(...)
tag = soup.find("label") # print(tag.parent) # for par in tag.parents: # print(par) print(tag.find_parent(id='content')) #根据条件去上面查找符合条件的一个标签 print(tag.find_parents(id='content')) #根据条件去向上面查找全部符合条件的标签,列表形式
soup.select("title") soup.select("p nth-of-type(3)") soup.select("body a") soup.select("html head title") tag = soup.select("span,a") soup.select("head > title") soup.select("p > a") soup.select("p > a:nth-of-type(2)") soup.select("p > #link1") soup.select("body > a") soup.select("#link1 ~ .sister") soup.select("#link1 + .sister") soup.select(".sister") soup.select("[class~=sister]") soup.select("#link1") soup.select("a#link2") soup.select('a[href]') soup.select('a[href="http://example.com/elsie"]') soup.select('a[href^="http://example.com/"]') soup.select('a[href$="tillie"]') soup.select('a[href*=".com/el"]') from bs4.element import Tag def default_candidate_generator(tag): for child in tag.descendants: if not isinstance(child, Tag): continue if not child.has_attr('href'): continue yield child tags = soup.find('body').select("a", _candidate_generator=default_candidate_generator) print(type(tags), tags) from bs4.element import Tag def default_candidate_generator(tag): for child in tag.descendants: if not isinstance(child, Tag): continue if not child.has_attr('href'): continue yield child tags = soup.find('body').select("a", _candidate_generator=default_candidate_generator, limit=1) print(type(tags), tags)
from bs4.element import Tag tag_obj = Tag(name='pre',attrs={"col":30}) tag_obj.string="这是一个新建标签" print(tag_obj) #<pre col="30">这是一个新建标签</pre>
soup = BeautifulSoup(html,features="lxml") from bs4.element import Tag tag_obj = Tag(name='pre',attrs={"col":30}) tag_obj.string="这是一个新建标签" # print(tag_obj) #<pre col="30">这是一个新建标签</pre> soup.find(id="content").append(tag_obj) #追加放在最后面 print(soup)
soup = BeautifulSoup(html,features="lxml") from bs4.element import Tag tag_obj = Tag(name='pre',attrs={"col":30}) tag_obj.string="这是一个新建标签" # print(tag_obj) #<pre col="30">这是一个新建标签</pre> soup.find(id="content").insert(0,tag_obj) #追加放在最前面 print(soup)
soup = BeautifulSoup(html,features="lxml") from bs4.element import Tag tag_obj = Tag(name='pre',attrs={"col":30}) tag_obj.string="这是一个新建标签" # print(tag_obj) #<pre col="30">这是一个新建标签</pre> soup.find(id="content").insert_before(tag_obj) #放在当前标签前面 soup.find(id="content").insert_after(tag_obj) #放在当前标签后面 print(soup)
soup = BeautifulSoup(html,features="lxml") from bs4.element import Tag tag_obj = Tag(name='pre',attrs={"col":30}) tag_obj.string="这是一个新建标签" # print(tag_obj) #<pre col="30">这是一个新建标签</pre> soup.find(id="content").replace_with(tag_obj) #原来div标签被替换 print(soup)
def setup(self, parent=None, previous_element=None, next_element=None,
previous_sibling=None, next_sibling=None):
soup = BeautifulSoup(html,features="lxml") div = soup.find('div') a = soup.find('a') div.setup(next_sibling=a) print(soup) #没有变化 print(div.next_sibling) #是咱们设置的那个标签对象
soup = BeautifulSoup(html,features="lxml") from bs4.element import Tag tag_obj = Tag(name='pre',attrs={"col":30}) tag_obj.string="这是一个新建标签" a = soup.find("a") a.wrap(tag_obj) #用新建标签将当前a标签包含起来 div = soup.find('div') tag_obj.wrap(div) #用本来存在的标签包含如今的tag_obj,包含数放在最后面 print(soup)
div = soup.find('div') div.unwrap() print(soup)
import requests
from bs4 import BeautifulSoup html1 = requests.get(url="https://github.com/login") #先到登陆页,获取token,cookies html1.encoding = html1.apparent_encoding soup = BeautifulSoup(html1.text,features="html.parser") login_token_obj = soup.find(name='input', attrs={'name': 'authenticity_token'}) login_token = login_token_obj.get("value") #获取到页面的令牌 cookie_dict = html1.cookies.get_dict() html1.close()
#填写form表单须要的数据 login_data = { 'login':"帐号", 'password':"密码", 'authenticity_token':login_token, "utf8": "", "commit":"Sign in" }
session_reponse = requests.post("https://github.com/session",data=login_data,cookies=cookie_dict) #必须传入cookies cookie_dict.update(session_reponse.cookies.get_dict()) #更新网站的cookies index_reponse = requests.get("https://github.com/settings/repositories",cookies=cookie_dict) #必须携带cookies soup2 = BeautifulSoup(index_reponse.text,features="html.parser") #解析下面的列表数据,获取项目名和项目大小 item_list = soup2.find_all("div",{'class':'listgroup-item'}) for item in item_list: a_obj = item.find("a") s_obj = item.find('small') print(a_obj.text) print(s_obj.text)
推文:为什么大量网站不能抓取?爬虫突破封禁的6种常见方法github
import requests
headers = {} #设置请求头 headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36' headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' i1 = requests.get("https://dig.chouti.com/",headers=headers) i1_cookie = i1.cookies.get_dict() print(i1_cookie) i1.close() form_data = { 'phone':"xxxx", 'password':"xxxx", 'oneMonth':'' } headers['Accept'] = '*/*' i2 = requests.post(url="https://dig.chouti.com/login",headers=headers,data=form_data,cookies=i1_cookie) i2_cookie = i2.cookies.get_dict() i2_cookie.update(i1_cookie) i3 = requests.post("https://dig.chouti.com/link/vote?linksId=20306326",headers=headers,cookies=i2_cookie) print(i3.text)
{'JSESSIONID': 'aaaoJAuXMtUytb02Uw9pw', 'route': '0c5178ac241ad1c9437c2aafd89a0e50', 'gpsd': '91e20c26ddac51c60ce4ca8910fb5669'}
{"result":{"code":"9999", "message":"推荐成功", "data":{"jid":"cdu_52941024478","likedTime":"1529420936883000","lvCount":"23","nick":"山上有风景","uvCount":"2","voteTime":"小于1分钟前"}}}