---恢复内容开始---html
NumPy:多维数组的有效操做。 高效的数学函数。python
Matplotlib:可视化:2D和(最近)3D图git
SciPy:大型库实现各类数值算法,例如: github
Sympy:符号计算(解析的 Analytical)web
Pandas:统计与数据分析(明天)算法
NumPy提供了一种新的数据类型:ndarray(n维数组)。数据库
np.array([2, 3, 6, 7])
np.array( [2, 3, 6, 7+ij])
import numpy as np # arange:range(start, stop, step)的全部三个参数 # 即起始值,结束值,步长都是能够用的 另外还有一个dtype参数,数据类型 a=np.arange(5) b=np.arange(10,100,20,dtype = float) #linspace(start,stop,num)返回数字间隔均匀的样本,按区间[start,stop]计算: c=np.linspace(0.,2.5,5)
import numpy as np a = np.array([[1, 2, 3] ,[4, 5, 6]]) print(a) print(a.shape)#行,列数 print(a.ndim)#维度数 print(a.size)#元素个数
import numpy as np a = np .arange(0, 20, 1) #1维 b = a.reshape((4, 5)) #4行5列 c = a.reshape((20, 1)) #2维 d = a.reshape((-1, 4)) #-1:自动肯定 e = a.shape =(4, 5) #改变a的形状
import numpy as np a = np.array([1,2,3,4,5]) b = a.copy () c1 = np.dot(np.transpose(a), b)#矩阵乘法dot print(c1) c = np.array([[1,2,3],[4,5,6],[7,8,9]]) print(c) tran = c.transpose()#转置矩阵transpose print(tran) ax = np.reshape(a, (5,1)) bx = np.reshape(b, (1,5)) c = np.dot(ax, bx) print(c)
import numpy as np数组
a = np.zeros(3)#零填充
b = np.zeros((2, 2), complex)#complex 复数
c = np.ones((2, 3))
d = np.random.rand(2, 4)#rand: 0和1之间均匀分布的随机数
e = np.random.randn(2, 4)#randn: 均值为0,标准差为1的标准(高斯)正态分布浏览器
import numpy as np a = np.arange(5) print(a) b = a[2:];b[0] = 100 print(b) print(a) c = a[2:].copy(); c[0]=99 print(c) print(a)
运算符 * 表示元素乘法,而不是矩阵乘法:服务器
使用dot()函数进行矩阵乘法:
import numpy as np A = np.array([[1, 2],[3, 4]]) print(A * A) print(np.dot(A,A))
其余可用的格式(参见API文档)。
save()将表保存为Numpy“.npy”格式的二进制文件
# Year Min temp. Max temp.
1990 -1.5 25.3
1991 -3.2 21.2
import numpy as np a = np.linspace(0, 1, 12) a.shape=(3,4) np.savetxt('myfile.txt',a) np.save('myfile',a) table = np.loadtxt("data.txt")
以上只是数值计算,接下来涉及符号计算,python经过模块sysmpy来进行符号计算,相似于方程求解,积分等的显式求解。
import sympy as sy #声明x,y为变量 x = sy.Symbol('x') y = sy.Symbol('y') a, b = sy.symbols('a b') #建立一个新符号(不是函数 f = x**2 + y**2 -2*x*y + 5 print(f) #自动简化 g = x**2 + 2 - 2*x + x**2 -1 print(g)
import sympy as sy x = sy.Symbol ('x') y = sy.Symbol('y') # 给定[-1,1] (give [-1, 1]) print(sy.solve (x**2 - 1)) # 无解 (no guarantee for solution) print(sy.solve(x**3 + 0.5*x**2 - 1)) # 用x的表达式表示y (exepress x in terms of y) print (sy.solve(x**3 + y**2)) # 错误:找不到算法 (error: no algorithm can be found) print(sy.solve(x**x + 2*x - 1))
import sympy as sy x = sy.Symbol('x') y = sy.Symbol( 'y') a,b = sy.symbols ( 'a b') # 单变量 single variable f = sy.sin(x) + sy.exp(x) print(sy.integrate(f, (x, a, b))) print(sy.integrate(f, (x, 1, 2))) print(sy.integrate(f, (x, 1.0,2.0))) # 多变量 multi variables g = sy.exp(x) + x * sy.sin(y) print(sy.integrate(g, (y,a,b)))
import sympy as sy x = sy.Symbol( 'x') y = sy.Symbol( 'y') # 单变量 (single variable) f = sy.cos(x) + x**x print(sy . diff (f , x)) # 多变量 (multi variables) g = sy.cos(y) * x + sy.log(y) print(sy.diff (g, y))
---恢复内容结束---
爬虫原理:
模拟浏览器 --> 往目标站点发送请求 --> 接收响应数据 --> 提取有用的数据 --> 保存到本地/数据库。
爬虫的全过程:
一、发送请求 (请求库)
- requests模块
- selenium模块
二、获取响应数据(服务器返回)
三、解析并提取数据(解析库)
- re正则
- bs4(BeautifulSoup4)
- Xpath
四、保存数据(存储库)
-MongoDB
import requests import re # 正则模块 # uuid.uuid4() 能够根据时间戳生成一段世界上惟一的随机字符串 import uuid # 爬虫三部曲 # 一、发送请求 def get_page(url): response = requests.get(url) return response # 二、解析数据 # 解析主页获取视频详情页ID def parse_index(text): res = re.findall('<a href="video_(.*?)"', text, re.S) #re.findall('正则匹配规则','解析文本','正则模式') # print(res) detail_url_list = [] for m_id in res: # 拼接详情页url detail_url = 'https://www.pearvideo.com/video_' + m_id # print(detail_url) detail_url_list.append(detail_url) # print(detail_url_list) return detail_url_list # 解析详情页获取视频url def parse_detail(text): '''''' ''' (.*?): 提取括号的内容 .*?: 直接匹配 <video webkit-playsinline="" playsinline="" x-webkit-airplay="" autoplay="autoplay" src="https://video.pearvideo.com/mp4/adshort/20190613/cont-1566073-14015522_adpkg-ad_hd.mp4" style="width: 100%; height: 100%;"></video> 正则: <video.*?src="(.*?)" # 以上是分析过程,不须要写 正则: srcUrl="(.*?)" ''' movie_url = re.findall('srcUrl="(.*?)"', text, re.S)[0] return movie_url # 三、保存数据 def save_movie(movie_url): response = requests.get(movie_url) # 把视频写到本地 with open(f'{uuid.uuid4()}.mp4', 'wb') as f: f.write(response.content) f.flush() if __name__ == '__main__': # main + 回车键 # 一、对主页发送请求 index_res = get_page(url='https://www.pearvideo.com/') # 二、对主页进行解析、获取详情页id detail_url_list = parse_index(index_res.text) # print(detail_url_list) # 三、对每一个详情页url发送请求 for detail_url in detail_url_list: detail_res = get_page(url=detail_url) print(detail_res.text) # 四、解析详情页获取视频url movie_url = parse_detail(detail_res.text) print(movie_url) # 五、保存视频 save_movie(movie_url)
采用多线程方
import requests import re#正则模块 from concurrent.futures import ThreadPoolExecutor #限制50个线程 pool=ThreadPoolExecutor(50) def get_page(url): print(f"异步任务{url}") response=requests.get(url) return response def parse_index(res): response=res.result() res = re.findall('<a href="video_(.*?)"',response.text,re.S) for m_id in res: detail_url='https://www.pearvideo.com/video_'+m_id pool.submit(get_page,detail_url).add_done_callback(parse_detail) def parse_detail(res): response=res.result() movie_url=re.findall('srcUrl="(.*?)"',response.text,re.S)[0] movie_name=re.findall('<title>(.*?)<',response.text,re.S)[0] pool.submit(save_movie,movie_url,movie_name) def save_movie(movie_url,movie_name): response=requests.get(movie_url) with open(f'{movie_name}.mp4','wb') as f: f.write(response.content) f.flush() if __name__=='__main__': url='https://www.pearvideo.com/' pool.submit(get_page,url).add_done_callback(parse_index)
GET请求讲解
User-Agent
# 访问知乎发现
请求url:
https://www.zhihu.com/explore
请求方式:
GET
请求头:
user-agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36
cookies
import requests # 请求头字典 # headers = { # 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36' # } # 在get请求内,添加user-agent # response = requests.get(url='https://www.zhihu.com/explore', headers=headers) # print(response.status_code) # 200 # # print(response.text) # with open('zhihu.html', 'w', encoding='utf-8') as f: # f.write(response.text) ''' params请求参数 访问百度搜查安徽工程大学url https://www.baidu.com/s?wd=安徽工程大学&pn=10第二页 https://www.baidu.com/s?wd=安徽工程大学&pn=20第三页 # ''' from urllib.parse import urlencode # url = 'https://www.baidu.com/s?wd=%E8%94%A1%E5%BE%90%E5%9D%A4' # url = 'https://www.baidu.com/s?' + urlencode({"wd": "蔡徐坤"}) url = 'https://www.baidu.com/s?' headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36' } # print(url) # 在get方法中添加params参数 # response = requests.get(url, headers=headers, params={"wd": "安徽工程大学"}) response = requests.get(url, headers=headers, params={"wd": "安徽工程大学", "pn": "20"}) # print(response.text) with open('gongcheng2.html', 'w', encoding='utf-8') as f: f.write(response.text)
携带登陆cookies破解github登陆验证
请求url:https://github.com/settings/emails
请求方式:
GET
请求头:
User-Agen
Cookie:
import requests # 请求url url = 'https://github.com/settings/emails' # 请求头 headers = { 'user-agent': '', # 在请求头中拼接cookies # 'Cookie': '' } # github_res = requests.get(url, headers=headers) import requests cookies = { 'Cookie': '' } github_res = requests.get(url, headers=headers, cookies=cookies) print('15622792660' in github_res.text)
爬取豆瓣电影
import requests import re url = 'https://movie.douban.com/top250' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36' } # 一、往豆瓣TOP250发送请求获取响应数据 response = requests.get(url, headers=headers) # print(response.text) # 二、经过正则解析提取数据 # 电影详情页url、图片连接、电影名称、电影评分、评价人数 movie_content_list = re.findall( # 正则规则 '<div class="item">.*?href="(.*?)">.*?src="(.*?)".*?<span class="title">(.*?)</span>.*?<span class="rating_num".*?>(.*?)</span>.*?<span>(.*?)人评价', # 解析文本 response.text, # 匹配模式 re.S) for movie_content in movie_content_list: # 解压赋值每一部电影 detail_url, movie_jpg, name, point, num = movie_content data = f'电影名称:{name}, 详情页url:{detail_url}, 图片url:{movie_jpg}, 评分: {point}, 评价人数: {num} \n' print(data) # 三、保存数据,把电影信息写入文件中 with open('douban.txt', 'a', encoding='utf-8') as f: f.write(data)
爬取豆瓣TOP250
import requests import re url_1 = 'https://movie.douban.com/top250?start=' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36' } for i in range(0,250,25): url=f'{url_1}{i}&filter=' response = requests.get(url, headers=headers) movie_content_list = re.findall( '<div class="item">.*?href="(.*?)">.*?src="(.*?)".*?<span class="title">(.*?)</span>.*?导演:(.*?)<br>.*?\n(.*?) / (.*?) / (.*?)\n.*?<span class="rating_num".*?>(.*?)</span>.*?<span>(.*?)人评价.*?class="inq">(.*?)</span>', response.text, re.S) for movie_content in movie_content_list: detail_url, movie_jpg, name, director,year,country,kind, point, num,profile = movie_content director=director.replace(' ',' ') data = f'电影名称:{name},导演:{director},{year.strip()},{country},{kind},评分: {point}, 评价人数: {num},{profile},详情页url:{detail_url}, 图片url:{movie_jpg}\n' with open('douban.txt', 'a', encoding='utf-8') as f: f.write(data)
效果图