(DotNetty的框架和实现是怎么回事,笔者不太清楚,但彻底可参考Netty官方的文档来学习和使用DotNetty相关的API接口)nginx
1 /* 2 * Netty 是一个半成品,做用是在须要基于自定义协议的基础上完成本身的通讯封装 3 * Netty 大大简化了网络程序的开发过程好比 TCP 和 UDP 的 socket 服务的开发。 4 * “快速和简单”并不意味着应用程序会有难维护和性能低的问题, 5 * Netty 是一个精心设计的框架,它从许多协议的实现中吸取了不少的经验好比 FTP、SMTP、HTTP、许多二进制和基于文本的传统协议。 6 * 所以,Netty 已经成功地找到一个方式,在不失灵活性的前提下来实现开发的简易性,高性能,稳定性。 7 */ 8 9 namespace Echo.Server 10 { 11 using System; 12 using System.Threading.Tasks; 13 using DotNetty.Codecs; 14 using DotNetty.Handlers.Logging; 15 using DotNetty.Transport.Bootstrapping; 16 using DotNetty.Transport.Channels; 17 using DotNetty.Transport.Libuv; 18 using Examples.Common; 19 20 static class Program 21 { 22 static async Task RunServerAsync() 23 { 24 ExampleHelper.SetConsoleLogger(); 25 26 // 申明一个主回路调度组 27 var dispatcher = new DispatcherEventLoopGroup(); 28 29 /* 30 Netty 提供了许多不一样的 EventLoopGroup 的实现用来处理不一样的传输。 31 在这个例子中咱们实现了一个服务端的应用,所以会有2个 NioEventLoopGroup 会被使用。 32 第一个常常被叫作‘boss’,用来接收进来的链接。第二个常常被叫作‘worker’,用来处理已经被接收的链接,一旦‘boss’接收到链接,就会把链接信息注册到‘worker’上。 33 如何知道多少个线程已经被使用,如何映射到已经建立的 Channel上都须要依赖于 IEventLoopGroup 的实现,而且能够经过构造函数来配置他们的关系。 34 */ 35 36 // 主工做线程组,设置为1个线程 37 IEventLoopGroup bossGroup = dispatcher; // (1) 38 // 子工做线程组,设置为1个线程 39 IEventLoopGroup workerGroup = new WorkerEventLoopGroup(dispatcher); 40 41 try 42 { 43 // 声明一个服务端Bootstrap,每一个Netty服务端程序,都由ServerBootstrap控制,经过链式的方式组装须要的参数 44 var serverBootstrap = new ServerBootstrap(); // (2) 45 // 设置主和工做线程组 46 serverBootstrap.Group(bossGroup, workerGroup); 47 48 // 申明服务端通讯通道为TcpServerChannel 49 serverBootstrap.Channel<TcpServerChannel>(); // (3) 50 51 serverBootstrap 52 // 设置网络IO参数等 53 .Option(ChannelOption.SoBacklog, 100) // (5) 54 55 // 在主线程组上设置一个打印日志的处理器 56 .Handler(new LoggingHandler("SRV-LSTN")) 57 58 // 设置工做线程参数 59 .ChildHandler( 60 /* 61 * ChannelInitializer 是一个特殊的处理类,他的目的是帮助使用者配置一个新的 Channel。 62 * 也许你想经过增长一些处理类好比DiscardServerHandler 来配置一个新的 Channel 或者其对应的ChannelPipeline 来实现你的网络程序。 63 * 当你的程序变的复杂时,可能你会增长更多的处理类到 pipline 上,而后提取这些匿名类到最顶层的类上。 64 */ 65 new ActionChannelInitializer<IChannel>( // (4) 66 channel => 67 { 68 /* 69 * 工做线程链接器是设置了一个管道,服务端主线程全部接收到的信息都会经过这个管道一层层往下传输, 70 * 同时全部出栈的消息 也要这个管道的全部处理器进行一步步处理。 71 */ 72 IChannelPipeline pipeline = channel.Pipeline; 73 74 // 添加日志拦截器 75 pipeline.AddLast(new LoggingHandler("SRV-CONN")); 76 77 // 添加出栈消息,经过这个handler在消息顶部加上消息的长度。 78 // LengthFieldPrepender(2):使用2个字节来存储数据的长度。 79 pipeline.AddLast("framing-enc", new LengthFieldPrepender(2)); 80 81 /* 82 入栈消息经过该Handler,解析消息的包长信息,并将正确的消息体发送给下一个处理Handler 83 1,InitialBytesToStrip = 0, //读取时须要跳过的字节数 84 2,LengthAdjustment = -5, //包实际长度的纠正,若是包长包括包头和包体,则要减去Length以前的部分 85 3,LengthFieldLength = 4, //长度字段的字节数 整型为4个字节 86 4,LengthFieldOffset = 1, //长度属性的起始(偏移)位 87 5,MaxFrameLength = int.MaxValue, //最大包长 88 */ 89 pipeline.AddLast("framing-dec", new LengthFieldBasedFrameDecoder(ushort.MaxValue, 0, 2, 0, 2)); 90 91 // 业务handler 92 pipeline.AddLast("echo", new EchoServerHandler()); 93 })); 94 95 // bootstrap绑定到指定端口的行为就是服务端启动服务,一样的Serverbootstrap能够bind到多个端口 96 IChannel boundChannel = await serverBootstrap.BindAsync(ServerSettings.Port); // (6) 97 98 Console.WriteLine("wait the client input"); 99 Console.ReadLine(); 100 101 // 关闭服务 102 await boundChannel.CloseAsync(); 103 } 104 finally 105 { 106 // 释放指定工做组线程 107 await Task.WhenAll( // (7) 108 bossGroup.ShutdownGracefullyAsync(TimeSpan.FromMilliseconds(100), TimeSpan.FromSeconds(1)), 109 workerGroup.ShutdownGracefullyAsync(TimeSpan.FromMilliseconds(100), TimeSpan.FromSeconds(1)) 110 ); 111 } 112 } 113 114 static void Main() => RunServerAsync().Wait(); 115 } 116 }
上一部分代码中加粗地方的实现git
1 namespace Echo.Server 2 { 3 using System; 4 using System.Text; 5 using DotNetty.Buffers; 6 using DotNetty.Transport.Channels; 7 8 /// <summary> 9 /// 服务端处理事件函数 10 /// </summary> 11 public class EchoServerHandler : ChannelHandlerAdapter // ChannelHandlerAdapter 业务继承基类适配器 // (1) 12 { 13 /// <summary> 14 /// 管道开始读 15 /// </summary> 16 /// <param name="context"></param> 17 /// <param name="message"></param> 18 public override void ChannelRead(IChannelHandlerContext context, object message) // (2) 19 { 20 if (message is IByteBuffer buffer) // (3) 21 { 22 Console.WriteLine("Received from client: " + buffer.ToString(Encoding.UTF8)); 23 } 24 25 context.WriteAsync(message); // (4) 26 } 27 28 /// <summary> 29 /// 管道读取完成 30 /// </summary> 31 /// <param name="context"></param> 32 public override void ChannelReadComplete(IChannelHandlerContext context) => context.Flush(); // (5) 33 34 /// <summary> 35 /// 出现异常 36 /// </summary> 37 /// <param name="context"></param> 38 /// <param name="exception"></param> 39 public override void ExceptionCaught(IChannelHandlerContext context, Exception exception) 40 { 41 Console.WriteLine("Exception: " + exception); 42 context.CloseAsync(); 43 } 44 } 45 }
重点看注释的地方,其余地方跟Server端没有任何区别程序员
1 namespace Echo.Client 2 { 3 using System; 4 using System.Net; 5 using System.Text; 6 using System.Threading.Tasks; 7 using DotNetty.Buffers; 8 using DotNetty.Codecs; 9 using DotNetty.Handlers.Logging; 10 using DotNetty.Transport.Bootstrapping; 11 using DotNetty.Transport.Channels; 12 using DotNetty.Transport.Channels.Sockets; 13 using Examples.Common; 14 15 static class Program 16 { 17 static async Task RunClientAsync() 18 { 19 ExampleHelper.SetConsoleLogger(); 20 21 var group = new MultithreadEventLoopGroup(); 22 23 try 24 { 25 var bootstrap = new Bootstrap(); 26 bootstrap 27 .Group(group) 28 .Channel<TcpSocketChannel>() 29 .Option(ChannelOption.TcpNodelay, true) 30 .Handler( 31 new ActionChannelInitializer<ISocketChannel>( 32 channel => 33 { 34 IChannelPipeline pipeline = channel.Pipeline; 35 pipeline.AddLast(new LoggingHandler()); 36 pipeline.AddLast("framing-enc", new LengthFieldPrepender(2)); 37 pipeline.AddLast("framing-dec", new LengthFieldBasedFrameDecoder(ushort.MaxValue, 0, 2, 0, 2)); 38 39 pipeline.AddLast("echo", new EchoClientHandler()); 40 })); 41 42 IChannel clientChannel = await bootstrap.ConnectAsync(new IPEndPoint(ClientSettings.Host, ClientSettings.Port)); 43 44 // 创建死循环,类同于While(true) 45 for (;;) // (4) 46 { 47 Console.WriteLine("input you data:"); 48 // 根据设置创建缓存区大小 49 IByteBuffer initialMessage = Unpooled.Buffer(ClientSettings.Size); // (1) 50 string r = Console.ReadLine(); 51 // 将数据流写入缓冲区 52 initialMessage.WriteBytes(Encoding.UTF8.GetBytes(r ?? throw new InvalidOperationException())); // (2) 53 // 将缓冲区数据流写入到管道中 54 await clientChannel.WriteAndFlushAsync(initialMessage); // (3) 55 if(r.Contains("bye")) 56 break; 57 } 58 59 Console.WriteLine("byebye"); 60 61 62 await clientChannel.CloseAsync(); 63 } 64 finally 65 { 66 await group.ShutdownGracefullyAsync(TimeSpan.FromMilliseconds(100), TimeSpan.FromSeconds(1)); 67 } 68 } 69 70 static void Main() => RunClientAsync().Wait(); 71 } 72 }
1 namespace Echo.Client 2 { 3 using System; 4 using System.Text; 5 using DotNetty.Buffers; 6 using DotNetty.Transport.Channels; 7 8 public class EchoClientHandler : ChannelHandlerAdapter 9 { 10 readonly IByteBuffer initialMessage; 11 12 public override void ChannelActive(IChannelHandlerContext context) => context.WriteAndFlushAsync(this.initialMessage); 13 14 public override void ChannelRead(IChannelHandlerContext context, object message) 15 { 16 if (message is IByteBuffer byteBuffer) 17 { 18 Console.WriteLine("Received from server: " + byteBuffer.ToString(Encoding.UTF8)); 19 } 20 } 21 22 public override void ChannelReadComplete(IChannelHandlerContext context) => context.Flush(); 23 24 public override void ExceptionCaught(IChannelHandlerContext context, Exception exception) 25 { 26 Console.WriteLine("Exception: " + exception); 27 context.CloseAsync(); 28 } 29 } 30 }
虽然DotNetty官方没有提供任何技术文档,但官方却提供了详细的调试记录,不少时候,咱们学习者其实也能够经过调试记录来分析某一个功能的实现流程。咱们能够经过将DotNetty的内部输入输出记录打印到控制台上。github
InternalLoggerFactory.DefaultFactory.AddProvider(new ConsoleLoggerProvider((s, level) => true, false));
能够看到服务端的打印记录一下多出来了许多许多,有大部分是属于DotNetty内部调试时的打印记录,咱们只着重看以下的部分。web
dbug: SRV-LSTN[0] [id: 0x3e8afca1] HANDLER_ADDED dbug: SRV-LSTN[0] [id: 0x3e8afca1] REGISTERED (1) dbug: SRV-LSTN[0] [id: 0x3e8afca1] BIND: 0.0.0.0:8007 (2) wait the client input dbug: SRV-LSTN[0] [id: 0x3e8afca1, 0.0.0.0:8007] ACTIVE (3) dbug: SRV-LSTN[0] [id: 0x3e8afca1, 0.0.0.0:8007] READ (4) dbug: SRV-LSTN[0] [id: 0x3e8afca1, 0.0.0.0:8007] RECEIVED: [id: 0x7bac2775, 127.0.0.1:64073 :> 127.0.0.1:8007] (5) dbug: SRV-LSTN[0] [id: 0x3e8afca1, 0.0.0.0:8007] RECEIVED_COMPLETE (6) dbug: SRV-LSTN[0] [id: 0x3e8afca1, 0.0.0.0:8007] READ (7) dbug: SRV-CONN[0] [id: 0x7bac2775, 127.0.0.1:64073 => 127.0.0.1:8007] HANDLER_ADDED (8) dbug: SRV-CONN[0] [id: 0x7bac2775, 127.0.0.1:64073 => 127.0.0.1:8007] REGISTERED (9) dbug: SRV-CONN[0] [id: 0x7bac2775, 127.0.0.1:64073 => 127.0.0.1:8007] ACTIVE (10) dbug: SRV-CONN[0] [id: 0x7bac2775, 127.0.0.1:64073 => 127.0.0.1:8007] READ (11) dbug: DotNetty.Buffers.AbstractByteBuffer[0] (12) -Dio.netty.buffer.bytebuf.checkAccessible: True dbug: SRV-CONN[0] [id: 0x7bac2775, 127.0.0.1:64073 => 127.0.0.1:8007] RECEIVED: 14B (13) +-------------------------------------------------+ | 0 1 2 3 4 5 6 7 8 9 a b c d e f | +--------+-------------------------------------------------+----------------+ |100000000| 00 0C 68 65 6C 6C 6F 20 77 6F 72 6C 64 21 |..hello world! | +--------+-------------------------------------------------+----------------+ Received from client: hello world! dbug: SRV-CONN[0] (14) [id: 0x7bac2775, 127.0.0.1:64073 => 127.0.0.1:8007] WRITE: 2B +-------------------------------------------------+ | 0 1 2 3 4 5 6 7 8 9 a b c d e f | +--------+-------------------------------------------------+----------------+ |100000000| 00 0C |.. | +--------+-------------------------------------------------+----------------+ dbug: SRV-CONN[0] (15) [id: 0x7bac2775, 127.0.0.1:64073 => 127.0.0.1:8007] WRITE: 12B +-------------------------------------------------+ | 0 1 2 3 4 5 6 7 8 9 a b c d e f | +--------+-------------------------------------------------+----------------+ |100000000| 68 65 6C 6C 6F 20 77 6F 72 6C 64 21 |hello world! | +--------+-------------------------------------------------+----------------+ dbug: SRV-CONN[0] (16) [id: 0x7bac2775, 127.0.0.1:64073 => 127.0.0.1:8007] RECEIVED_COMPLETE dbug: SRV-CONN[0] (17) [id: 0x7bac2775, 127.0.0.1:64073 => 127.0.0.1:8007] FLUSH dbug: SRV-CONN[0] (18) [id: 0x7bac2775, 127.0.0.1:64073 => 127.0.0.1:8007] READ
咋一看,有18个操做,好像有点太多了,其实否则,还有不少不少的内部调试细节并没打印到控制台上。编程