[Go]TCP服务中增加消息队列与工作池
阅读原文时间:2020年10月30日阅读:1

之前的处理中每一个连接都会创建一个主groutine , 每个连接中的主groutine中创建出读groutine 和写groutine

每个连接处理业务再单独开出一个groutine ,这样如果有10万并发的连接 , 将会出现30万groutine ,其中读写占20万阻塞住的 , 不占用资源。处理业务的有10万groutine ,会不停的切换 , 比较占有CPU资源 , 现在把处理业务的groutine限制住 ,创建出一个工作池,里面存的是每个worker ,每个worker groutine去读取自己对应的channel ,这个channel是个有缓存的channel作为消息队列使用

package snet

import (
"bufio"
"fmt"
"log"
"math/rand"
"net"
"time"
)

type Conn struct {
IP string
Port uint32
TCPConn *net.TCPConn
MsgChan chan []byte
ExitChan chan bool
Closed bool
WorkerPool []chan []byte
WorkerPoolSize uint32
PreWorkerQueue uint32
}

func NewConn(IP string, Port uint32, WorkerPoolSize uint32) *Conn {
s := &Conn{
IP: IP,
Port: Port,
MsgChan: make(chan []byte),
ExitChan: make(chan bool),
WorkerPool: make([]chan []byte, WorkerPoolSize),
WorkerPoolSize: WorkerPoolSize,
PreWorkerQueue: ,
}
return s
}

func (c *Conn) Start() {
log.Printf("%s:%d start…\n", c.IP, c.Port)
go func() {
c.StartWorkerPool()
addr, err := net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", c.IP, c.Port))
if err != nil {
log.Println("resolve tcp addr err ", err)
return
}
listener, err := net.ListenTCP("tcp4", addr)
if err != nil {
log.Println("listen tcp err ", err)
return
}
var connid uint32
connid =
for {
conn, err := listener.AcceptTCP()
if err != nil {
log.Println("accept tcp err ", err)
continue
}
c.TCPConn = conn
go c.StartRead()
go c.StartWrite()
connid++
}
}()
select {}
}
func (c *Conn) StartRead() {
log.Println("read groutine is waiting")
defer c.Stop()
defer log.Println("read groutine exit")
reader := bufio.NewReader(c.TCPConn)
for {
lineBytes, err := reader.ReadBytes('\n')
if err != nil {
log.Println("startread read bytes error ", err)
break
}
len := len(lineBytes)
line := lineBytes[:len-]
log.Println("start read from client ", string(line))
if c.WorkerPoolSize>{
c.SendMsgToWorker(line)
}else{
go c.HandleMsg(line)
}
}
}
func (c *Conn) StartWrite() {
log.Println("write groutine is waiting")
defer log.Println("write groutine exit")
for {
select {
case data := <-c.MsgChan:
if _, err := c.TCPConn.Write(data); err != nil {
log.Println("startwrite conn write error ", err)
return
}
log.Println("start write from server ", string(data))
case <-c.ExitChan:
return
}
}
}
func (c *Conn) HandleMsg(data []byte) {
res := fmt.Sprintf("res:%s", string(data))
c.MsgChan <- []byte(res)
}
func (c *Conn) SendMsgToWorker(data []byte) {
rand.Seed(time.Now().UnixNano())
workerId := rand.Intn(int(c.WorkerPoolSize))
c.WorkerPool[workerId] <- data
}
func (c *Conn) StartWorkerPool() {
for i := ; i < int(c.WorkerPoolSize); i++ {
c.WorkerPool[i] = make(chan []byte, c.PreWorkerQueue)
go c.StartOneWorker(i, c.WorkerPool[i])
}
}
func (c *Conn) StartOneWorker(workerId int, queue chan []byte) {
log.Println("start one worker groutine is waiting:", workerId)
for {
select {
case data := <-queue:
c.HandleMsg(data)
log.Println("one worker groutine is finshed:", workerId)
}
}
}
func (c *Conn) Stop() {
if c.Closed {
return
}
c.Closed = true
c.ExitChan <- true

c.TCPConn.Close()  
close(c.ExitChan)  
close(c.MsgChan)  

}

手机扫一扫

移动阅读更方便

阿里云服务器
腾讯云服务器
七牛云服务器

你可能感兴趣的文章