首先说一下我为何要写这个,我并无系统的去学过Python,学校也只教了一点C。好久以前本身就有学python,但也只是学了一点点,和没学同样的。php
认真审视一下个人编程技术,真的很菜,必需要专一学习一门语言了。因此在这里就把之前学过的杂乱知识汇总一下,以便查阅。html
备注:如下代码均为Python 3.7python
在Python 中,咱们常用 socket()函数来建立套接字,语法格式以下:mysql
socket.socket([family[, type[, proto]]])
参数:sql
family: 套接字家族可使AF_UNIX或者AF_INET type: 套接字类型能够根据是面向链接的仍是非链接分为SOCK_STREAM或SOCK_DGRAM protocol: 通常不填默认为0
使用方法举例:固定IP端口探测chrome
import socket s=socket.socket() #初始化 try: s.connect(('43.225.100.88', 22)) except:pass message='hello word!\n' message=message.encode() s.send(message) banner=s.recv(1024) print(banner)
import socket import sys name=sys.argv[0] ip=sys.argv[1] s=socket.socket() message='hello word!\n' message=message.encode() for port in range(20, 40): try: print("[+] Attempting to connect to:" + ip + ":" + str(port) + "...") s.connect((ip, port)) s.send(message) banner = s.recv(1024) if banner: print("[-] Port " + str(port) + " is open:", end="") print(banner) except:pass s.close()
import socket hosts = ['127.0.0.1', '192.168.1.5', '10.0.0.1'] ports = [22,445,80,443,3389] s=socket.socket() messages='hello word\n' messages=messages.encode() for host in hosts: for port in ports: try: print("[+] Connecting to "+host+":"+str(port)) s.connect((host,port)) s.send(messages) banner=s.recv(1024) if banner: print("[-] Port "+str(port)+" is open:",end="") print(banner) except:pass s.close()
import socket import re host='192.168.83.130/24' #自定义要扫描的端口 ports=['22','1433','3306'] #设置超时时间2秒 socket.setdefaulttimeout(2) def scan(host): for port in ports: try: s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect((host,int(port))) print("[*]%s port %s is open"%(host,port)) s.close() if '21' in port: #若是21端口开放,尝试爆破FTP弱口令 print("[*] Try to crack FTP pass") __import__('ftp_check').check(host) elif '3306' in port: #若是3306端口开放,尝试爆破Mysql弱口令 print("[*]Try to crack Mysql pass") __import__('mysql_check').check(host) except: continue #检查用户输入是否为有效IP地址,若是用户输入中带有/24,则扫描整个C段,不然扫描单个IP if '/24' in host: print(host) for x in range(130,140,1): #这里并无真正扫描C段,节约时间,扫描了10个IP ip=re.sub(r'.\d+/24','.'+str(x),host) #表示将最后的地址替换成1到255的数字 print(ip) scan(ip) else: ip=host scan(ip)
__import__
函数:编程
__import__(module)
至关于import module
import urllib.request response=urllib.request.urlopen('https://www.cnblogs.com') #请求站点得到一个HTTP Response对象 print(response.read().decode('utf-8')) #返回网页内容 print(response.getheader('server')) #返回响应头中的server值 print(response.getheaders()) #以列表元祖对的形式返回响应头信息 print(response.fileno()) #返回文件描述符 print(response.version) #返回版本信息 print(response.status) #返回状态码200,404表明网页未找到 print(response.debuglevel) #返回调试等级 print(response.closed) #返回对象是否关闭布尔值 print(response.geturl()) #返回检索的URL print(response.info()) #返回网页的头信息 print(response.getcode()) #返回响应的HTTP状态码 print(response.msg) #访问成功则返回ok print(response.reason) #返回状态信息
参数:json
url:网站地址,str类型,也能够是一个request对象 data:data参数是可选的,内容为字节流编码格式的即bytes类型,若是传递data参数,urlopen将使用Post方式请求 timeout:用于设置超时时间,单位为秒,若是请求超出了设置时间还未获得响应则抛出异常,支持HTTP,HTTPS,FTP请求 context:她必须是ssl.SSLContext类型,用来指定SSL设置,此外,cafile和capath这两个参数分别指定CA证书和它的路径,会在https连接时用到。
参数:cookie
url:请求的URL,必须传递的参数,其余都是可选参数 data:上传的数据,必须传bytes字节流类型的数据,若是它是字典,能够先用urllib.parse模块里的urlencode()编码 headers:它是一个字典,传递的是请求头数据,能够经过它构造请求头,也能够经过调用请求实例的方法add_header()来添加 origin_req_host:指请求方的host名称或者IP地址 unverifiable:表示这个请求是不是没法验证的,默认为False,如咱们请求一张图片若是没有权限获取图片那它的值就是true method:是一个字符串,用来指示请求使用的方法,如:GET,POST,PUT等
from urllib import request,parse url='http://httpbin.org/post' headers={ 'User-Agent':'Mozilla/5.0 (compatible; MSIE 5.5; Windows NT)', 'Host':'httpbin.org' } #定义头信息 dict={'name':'germey'} data = bytes(parse.urlencode(dict),encoding='utf-8') #data须要字节类型的参数,使用bytes()函数转换为字节,使用urllib.parse模块里的urlencode()方法来说参数字典转换为字符串并指定编码 req = request.Request(url=url,data=data,headers=headers,method='POST') #req.add_header('User-Agent','Mozilla/5.0 (compatible; MSIE 8.4; Windows NT') #也能够request的方法来添加 response = request.urlopen(req) print(response.read())
from urllib import request,parse values ={"id":"2"} params="?" #这里是GET传参须要的 for key in values: params = (params + key + "=" + values[key] ) url="http://43.247.91.228:84/Less-1/index.php" headers = { # heard部分直接经过chrome部分request header部分 # 'Accept': 'application/json, text/plain, */*', # 'Accept-Encoding': 'gzip, deflate', # 'Accept-Language': 'zh-CN,zh;q=0.8', # 'Connection': 'keep-alive', # 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': url, 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36' } data = bytes(parse.urlencode(values),encoding='utf-8') #req = request.Request(url=url, headers=headers, data=data) #POST方法 req = request.Request(url+params) #GET方法 response =request.urlopen(req) print (response.read().decode('utf-8'))
from urllib import parse,request import urllib.error url='http://43.247.91.228:80/Less-1/index.php' headers={ 'User-Agent':'Mozilla/5.0 (compatible; MSIE 5.5; Windows NT)', 'Host':'43.247.91.228:80' } #定义头信息 data={'id':'1'} params='?' for key in data: params = (params + key + "=" + data[key] ) data = bytes(parse.urlencode(data),encoding='utf-8') req = urllib.request.Request(url+params) try: respose=urllib.request.urlopen(req) print(respose.read().decode('utf-8')) except urllib.error.HTTPError as e: print(e.code) print(e.read().decode("utf-8"))
import urllib.request proxy_support = urllib.request.ProxyHandler({"http" : "39.80.118.178:8060"}) opener = urllib.request.build_opener(proxy_support) urllib.request.install_opener(opener) a = urllib.request.urlopen("http://43.247.91.228:84/Less-1/?id=1").read().decode("utf-8") print(a)
import requests r = requests.get('http://43.247.91.228:84/Less-1/?id=1') print r.headers print r.status_code print r.url print r.text print r.content
import requests payload ={'id':1} r = requests.get('http://43.247.91.228:84/Less-1/',params=payload) print(r.url) print(r.content.decode("utf-8"))
import requests payload ={'id':1} r = requests.post('http://43.247.91.228:84/Less-1/',data=payload) print(r.url) print(r.content.decode("utf-8"))
import requests url='http://43.247.91.228:84/Less-1/?id=1' headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0'} r= requests.get(url,headers=headers) print(r.text)
import requests raw_cookies="PHPSESSID=d7kkojg82otnh9c53ao1m87pq3; security=low" cookies={} for line in raw_cookies.split(';'): key,value=line.split('=',1) cookies[key]=value testurl='http://43.247.91.228:81/' s=requests.get(testurl,cookies=cookies) print(s.text)
import requests data = {'username':'admin','password':'password','Login':'Login'} r=requests.post('http://43.247.91.228:81/login.php',data=data); print(r.url) print(r.content.decode("utf-8"))app
result=requests.get('https://www.v2ex.com', verify=False)
忽略验证SSL证书,否则会报错