2019-01-07 10:30:42 +03:00
|
|
|
/*
|
|
|
|
NAME
|
|
|
|
packet.go
|
|
|
|
|
|
|
|
DESCRIPTION
|
2019-01-10 17:11:28 +03:00
|
|
|
RTMP packet functionality.
|
2019-01-07 10:30:42 +03:00
|
|
|
|
|
|
|
AUTHORS
|
|
|
|
Saxon Nelson-Milton <saxon@ausocean.org>
|
|
|
|
Dan Kortschak <dan@ausocean.org>
|
|
|
|
Alan Noble <alan@ausocean.org>
|
|
|
|
|
|
|
|
LICENSE
|
|
|
|
packet.go is Copyright (C) 2017-2019 the Australian Ocean Lab (AusOcean)
|
|
|
|
|
|
|
|
It is free software: you can redistribute it and/or modify them
|
|
|
|
under the terms of the GNU General Public License as published by the
|
|
|
|
Free Software Foundation, either version 3 of the License, or (at your
|
|
|
|
option) any later version.
|
|
|
|
|
|
|
|
It is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with revid in gpl.txt. If not, see http://www.gnu.org/licenses.
|
|
|
|
|
|
|
|
Derived from librtmp under the GNU Lesser General Public License 2.1
|
|
|
|
Copyright (C) 2005-2008 Team XBMC http://www.xbmc.org
|
|
|
|
Copyright (C) 2008-2009 Andrej Stepanchuk
|
|
|
|
Copyright (C) 2009-2010 Howard Chu
|
|
|
|
*/
|
|
|
|
|
|
|
|
package rtmp
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
2019-01-10 15:46:20 +03:00
|
|
|
"io"
|
2019-01-11 23:43:27 +03:00
|
|
|
|
|
|
|
"bitbucket.org/ausocean/av/rtmp/amf"
|
2019-01-07 10:30:42 +03:00
|
|
|
)
|
|
|
|
|
2019-01-11 02:22:21 +03:00
|
|
|
// Packet types.
|
2019-01-07 10:30:42 +03:00
|
|
|
const (
|
2019-01-11 02:22:21 +03:00
|
|
|
packetTypeChunkSize = 0x01
|
|
|
|
packetTypeBytesReadReport = 0x03
|
|
|
|
packetTypeControl = 0x04
|
|
|
|
packetTypeServerBW = 0x05
|
|
|
|
packetTypeClientBW = 0x06
|
|
|
|
packetTypeAudio = 0x08
|
|
|
|
packetTypeVideo = 0x09
|
|
|
|
packetTypeFlexStreamSend = 0x0F // not implemented
|
|
|
|
packetTypeFlexSharedObject = 0x10 // not implemented
|
|
|
|
packetTypeFlexMessage = 0x11 // not implemented
|
|
|
|
packetTypeInfo = 0x12
|
|
|
|
packetTypeInvoke = 0x14
|
|
|
|
packetTypeFlashVideo = 0x16 // not implemented
|
2019-01-07 10:30:42 +03:00
|
|
|
)
|
|
|
|
|
2019-01-11 02:22:21 +03:00
|
|
|
// Header sizes.
|
2019-01-07 10:30:42 +03:00
|
|
|
const (
|
2019-01-11 02:22:21 +03:00
|
|
|
headerSizeLarge = 0
|
|
|
|
headerSizeMedium = 1
|
|
|
|
headerSizeSmall = 2
|
|
|
|
headerSizeMinimum = 3
|
|
|
|
headerSizeAuto = 4
|
2019-01-07 10:30:42 +03:00
|
|
|
)
|
|
|
|
|
2019-01-11 02:22:21 +03:00
|
|
|
// Special channels.
|
2019-01-07 14:15:00 +03:00
|
|
|
const (
|
2019-01-11 02:22:21 +03:00
|
|
|
chanBytesRead = 0x02
|
|
|
|
chanControl = 0x03
|
|
|
|
chanSource = 0x04
|
2019-01-07 14:15:00 +03:00
|
|
|
)
|
|
|
|
|
2019-01-10 05:53:12 +03:00
|
|
|
// headerSizes defines header sizes for header types 0, 1, 2 and 3 respectively:
|
|
|
|
// 0: full header (12 bytes)
|
|
|
|
// 1: header without message ID (8 bytes)
|
|
|
|
// 2: basic header + timestamp (4 byes)
|
|
|
|
// 3: basic header (chunk type and stream ID) (1 byte)
|
|
|
|
var headerSizes = [...]int{12, 8, 4, 1}
|
2019-01-07 10:30:42 +03:00
|
|
|
|
|
|
|
// packet defines an RTMP packet.
|
|
|
|
type packet struct {
|
|
|
|
headerType uint8
|
|
|
|
packetType uint8
|
|
|
|
channel int32
|
|
|
|
hasAbsTimestamp bool
|
|
|
|
timestamp uint32
|
|
|
|
info int32
|
|
|
|
bodySize uint32
|
|
|
|
bytesRead uint32
|
|
|
|
chunk *chunk
|
|
|
|
header []byte
|
|
|
|
body []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
// chunk defines an RTMP packet chunk.
|
|
|
|
type chunk struct {
|
|
|
|
headerSize int32
|
|
|
|
data []byte
|
2019-01-11 02:22:21 +03:00
|
|
|
header [fullHeaderSize]byte
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
|
2019-01-10 15:59:51 +03:00
|
|
|
// read reads a packet.
|
|
|
|
func (pkt *packet) read(s *Session) error {
|
2019-01-11 02:22:21 +03:00
|
|
|
var hbuf [fullHeaderSize]byte
|
2019-01-07 10:30:42 +03:00
|
|
|
header := hbuf[:]
|
|
|
|
|
2019-01-11 03:05:20 +03:00
|
|
|
_, err := s.read(header[:1])
|
2019-01-07 10:30:42 +03:00
|
|
|
if err != nil {
|
2019-01-07 16:29:41 +03:00
|
|
|
s.log(DebugLevel, pkg+"failed to read packet header 1st byte", "error", err.Error())
|
2019-01-10 05:18:31 +03:00
|
|
|
if err == io.EOF {
|
|
|
|
s.log(WarnLevel, pkg+"EOF error; connection likely terminated")
|
|
|
|
}
|
2019-01-07 10:30:42 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
pkt.headerType = (header[0] & 0xc0) >> 6
|
|
|
|
pkt.channel = int32(header[0] & 0x3f)
|
|
|
|
header = header[1:]
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case pkt.channel == 0:
|
2019-01-11 03:05:20 +03:00
|
|
|
_, err = s.read(header[:1])
|
2019-01-07 10:30:42 +03:00
|
|
|
if err != nil {
|
2019-01-07 16:29:41 +03:00
|
|
|
s.log(DebugLevel, pkg+"failed to read packet header 2nd byte", "error", err.Error())
|
2019-01-07 10:30:42 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
header = header[1:]
|
|
|
|
pkt.channel = int32(header[0]) + 64
|
|
|
|
|
|
|
|
case pkt.channel == 1:
|
2019-01-11 03:05:20 +03:00
|
|
|
_, err = s.read(header[:2])
|
2019-01-07 10:30:42 +03:00
|
|
|
if err != nil {
|
2019-01-07 16:29:41 +03:00
|
|
|
s.log(DebugLevel, pkg+"failed to read packet header 3rd byte", "error", err.Error())
|
2019-01-07 10:30:42 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
header = header[2:]
|
|
|
|
pkt.channel = int32(binary.BigEndian.Uint16(header[:2])) + 64
|
|
|
|
}
|
|
|
|
|
|
|
|
if pkt.channel >= s.channelsAllocatedIn {
|
|
|
|
n := pkt.channel + 10
|
|
|
|
timestamp := append(s.channelTimestamp, make([]int32, 10)...)
|
|
|
|
|
|
|
|
var pkts []*packet
|
2019-01-09 15:21:07 +03:00
|
|
|
if s.channelsIn == nil {
|
2019-01-07 10:30:42 +03:00
|
|
|
pkts = make([]*packet, n)
|
|
|
|
} else {
|
2019-01-09 15:21:07 +03:00
|
|
|
pkts = append(s.channelsIn[:pkt.channel:pkt.channel], make([]*packet, 10)...)
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
s.channelTimestamp = timestamp
|
2019-01-09 15:21:07 +03:00
|
|
|
s.channelsIn = pkts
|
2019-01-07 10:30:42 +03:00
|
|
|
|
|
|
|
for i := int(s.channelsAllocatedIn); i < len(s.channelTimestamp); i++ {
|
|
|
|
s.channelTimestamp[i] = 0
|
|
|
|
}
|
|
|
|
for i := int(s.channelsAllocatedIn); i < int(n); i++ {
|
2019-01-09 15:21:07 +03:00
|
|
|
s.channelsIn[i] = nil
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
s.channelsAllocatedIn = n
|
|
|
|
}
|
|
|
|
|
2019-01-10 05:53:12 +03:00
|
|
|
size := headerSizes[pkt.headerType]
|
2019-01-07 10:30:42 +03:00
|
|
|
switch {
|
2019-01-11 02:22:21 +03:00
|
|
|
case size == fullHeaderSize:
|
2019-01-07 10:30:42 +03:00
|
|
|
pkt.hasAbsTimestamp = true
|
2019-01-11 02:22:21 +03:00
|
|
|
case size < fullHeaderSize:
|
2019-01-09 15:21:07 +03:00
|
|
|
if s.channelsIn[pkt.channel] != nil {
|
|
|
|
*pkt = *(s.channelsIn[pkt.channel])
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
size--
|
|
|
|
|
|
|
|
if size > 0 {
|
2019-01-11 03:05:20 +03:00
|
|
|
_, err = s.read(header[:size])
|
2019-01-07 10:30:42 +03:00
|
|
|
if err != nil {
|
2019-01-07 16:29:41 +03:00
|
|
|
s.log(DebugLevel, pkg+"failed to read packet header", "error", err.Error())
|
2019-01-07 10:30:42 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
hSize := len(hbuf) - len(header) + size
|
|
|
|
|
|
|
|
if size >= 3 {
|
2019-01-11 23:43:27 +03:00
|
|
|
pkt.timestamp = amf.DecodeInt24(header[:3])
|
2019-01-07 10:30:42 +03:00
|
|
|
if size >= 6 {
|
2019-01-11 23:43:27 +03:00
|
|
|
pkt.bodySize = amf.DecodeInt24(header[3:6])
|
2019-01-07 10:30:42 +03:00
|
|
|
pkt.bytesRead = 0
|
|
|
|
|
|
|
|
if size > 6 {
|
|
|
|
pkt.packetType = header[6]
|
|
|
|
|
|
|
|
if size == 11 {
|
|
|
|
pkt.info = decodeInt32LE(header[7:11])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
extendedTimestamp := pkt.timestamp == 0xffffff
|
|
|
|
if extendedTimestamp {
|
2019-01-11 03:05:20 +03:00
|
|
|
_, err = s.read(header[size : size+4])
|
2019-01-07 10:30:42 +03:00
|
|
|
if err != nil {
|
2019-01-07 16:29:41 +03:00
|
|
|
s.log(DebugLevel, pkg+"failed to read extended timestamp", "error", err.Error())
|
2019-01-07 10:30:42 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
// TODO: port this
|
2019-01-11 23:43:27 +03:00
|
|
|
pkt.timestamp = amf.DecodeInt32(header[size : size+4])
|
2019-01-07 10:30:42 +03:00
|
|
|
hSize += 4
|
|
|
|
}
|
|
|
|
|
|
|
|
if pkt.bodySize > 0 && pkt.body == nil {
|
2019-01-10 15:59:51 +03:00
|
|
|
pkt.resize(pkt.bodySize, (hbuf[0]&0xc0)>>6)
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
toRead := int32(pkt.bodySize - pkt.bytesRead)
|
|
|
|
chunkSize := s.inChunkSize
|
|
|
|
|
|
|
|
if toRead < chunkSize {
|
|
|
|
chunkSize = toRead
|
|
|
|
}
|
|
|
|
|
|
|
|
if pkt.chunk != nil {
|
2019-01-11 02:22:21 +03:00
|
|
|
panic("non-nil chunk")
|
2019-01-07 10:30:42 +03:00
|
|
|
pkt.chunk.headerSize = int32(hSize)
|
|
|
|
copy(pkt.chunk.header[:], hbuf[:hSize])
|
|
|
|
pkt.chunk.data = pkt.body[pkt.bytesRead : pkt.bytesRead+uint32(chunkSize)]
|
|
|
|
}
|
|
|
|
|
2019-01-11 03:05:20 +03:00
|
|
|
_, err = s.read(pkt.body[pkt.bytesRead:][:chunkSize])
|
2019-01-07 10:30:42 +03:00
|
|
|
if err != nil {
|
2019-01-07 16:29:41 +03:00
|
|
|
s.log(DebugLevel, pkg+"failed to read packet body", "error", err.Error())
|
2019-01-07 10:30:42 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
pkt.bytesRead += uint32(chunkSize)
|
|
|
|
|
|
|
|
// keep the packet as ref for other packets on this channel
|
2019-01-09 15:21:07 +03:00
|
|
|
if s.channelsIn[pkt.channel] == nil {
|
|
|
|
s.channelsIn[pkt.channel] = &packet{}
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
2019-01-09 15:21:07 +03:00
|
|
|
*(s.channelsIn[pkt.channel]) = *pkt
|
2019-01-07 10:30:42 +03:00
|
|
|
|
|
|
|
if extendedTimestamp {
|
2019-01-09 15:21:07 +03:00
|
|
|
s.channelsIn[pkt.channel].timestamp = 0xffffff
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if pkt.bytesRead != pkt.bodySize {
|
|
|
|
panic("readPacket: bytesRead != bodySize")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !pkt.hasAbsTimestamp {
|
|
|
|
// timestamps seem to always be relative
|
|
|
|
pkt.timestamp += uint32(s.channelTimestamp[pkt.channel])
|
|
|
|
}
|
|
|
|
s.channelTimestamp[pkt.channel] = int32(pkt.timestamp)
|
|
|
|
|
2019-01-09 15:21:07 +03:00
|
|
|
s.channelsIn[pkt.channel].body = nil
|
|
|
|
s.channelsIn[pkt.channel].bytesRead = 0
|
|
|
|
s.channelsIn[pkt.channel].hasAbsTimestamp = false
|
2019-01-07 10:30:42 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-01-11 03:05:20 +03:00
|
|
|
// resize adjusts the packet's storage to accommodate a body of the given size and header type.
|
2019-01-10 15:59:51 +03:00
|
|
|
func (pkt *packet) resize(size uint32, ht uint8) {
|
2019-01-11 02:22:21 +03:00
|
|
|
buf := make([]byte, fullHeaderSize+size)
|
2019-01-07 10:30:42 +03:00
|
|
|
pkt.header = buf
|
2019-01-11 02:22:21 +03:00
|
|
|
pkt.body = buf[fullHeaderSize:]
|
|
|
|
if ht != headerSizeAuto {
|
2019-01-10 09:04:00 +03:00
|
|
|
pkt.headerType = ht
|
|
|
|
return
|
|
|
|
}
|
|
|
|
switch pkt.packetType {
|
2019-01-11 02:22:21 +03:00
|
|
|
case packetTypeVideo, packetTypeAudio:
|
2019-01-10 09:04:00 +03:00
|
|
|
if pkt.timestamp == 0 {
|
2019-01-11 02:22:21 +03:00
|
|
|
pkt.headerType = headerSizeLarge
|
2019-01-10 09:04:00 +03:00
|
|
|
} else {
|
2019-01-11 02:22:21 +03:00
|
|
|
pkt.headerType = headerSizeMedium
|
2019-01-10 09:04:00 +03:00
|
|
|
}
|
2019-01-11 02:22:21 +03:00
|
|
|
case packetTypeInfo:
|
|
|
|
pkt.headerType = headerSizeLarge
|
2019-01-10 09:04:00 +03:00
|
|
|
pkt.bodySize += 16
|
|
|
|
default:
|
2019-01-11 02:22:21 +03:00
|
|
|
pkt.headerType = headerSizeMedium
|
2019-01-10 09:04:00 +03:00
|
|
|
}
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
|
2019-01-10 15:59:51 +03:00
|
|
|
// write sends a packet.
|
2019-01-11 03:05:20 +03:00
|
|
|
// When queue is true, we expect a response to this request and cache the method on s.methodCalls.
|
2019-01-10 15:59:51 +03:00
|
|
|
func (pkt *packet) write(s *Session, queue bool) error {
|
2019-01-10 17:11:28 +03:00
|
|
|
if pkt.body == nil {
|
|
|
|
return errInvalidBody
|
|
|
|
}
|
2019-01-07 10:30:42 +03:00
|
|
|
|
|
|
|
if pkt.channel >= s.channelsAllocatedOut {
|
2019-01-10 17:11:28 +03:00
|
|
|
s.log(DebugLevel, pkg+"growing channelsOut", "channel", pkt.channel)
|
2019-01-07 10:30:42 +03:00
|
|
|
n := int(pkt.channel + 10)
|
|
|
|
|
|
|
|
var pkts []*packet
|
2019-01-09 15:21:07 +03:00
|
|
|
if s.channelsOut == nil {
|
2019-01-07 10:30:42 +03:00
|
|
|
pkts = make([]*packet, n)
|
|
|
|
} else {
|
2019-01-09 15:21:07 +03:00
|
|
|
pkts = append(s.channelsOut[:pkt.channel:pkt.channel], make([]*packet, 10)...)
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
2019-01-09 15:21:07 +03:00
|
|
|
s.channelsOut = pkts
|
2019-01-07 10:30:42 +03:00
|
|
|
|
|
|
|
for i := int(s.channelsAllocatedOut); i < n; i++ {
|
2019-01-09 15:21:07 +03:00
|
|
|
s.channelsOut[i] = nil
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
s.channelsAllocatedOut = int32(n)
|
|
|
|
}
|
|
|
|
|
2019-01-10 17:11:28 +03:00
|
|
|
prevPkt := s.channelsOut[pkt.channel]
|
|
|
|
var last int
|
2019-01-11 02:22:21 +03:00
|
|
|
if prevPkt != nil && pkt.headerType != headerSizeLarge {
|
2019-01-07 10:30:42 +03:00
|
|
|
// compress a bit by using the prev packet's attributes
|
2019-01-11 02:22:21 +03:00
|
|
|
if prevPkt.bodySize == pkt.bodySize && prevPkt.packetType == pkt.packetType && pkt.headerType == headerSizeMedium {
|
|
|
|
pkt.headerType = headerSizeSmall
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
|
2019-01-11 02:22:21 +03:00
|
|
|
if prevPkt.timestamp == pkt.timestamp && pkt.headerType == headerSizeSmall {
|
|
|
|
pkt.headerType = headerSizeMinimum
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
last = int(prevPkt.timestamp)
|
|
|
|
}
|
|
|
|
|
|
|
|
if pkt.headerType > 3 {
|
2019-01-07 16:29:41 +03:00
|
|
|
s.log(WarnLevel, pkg+"unexpected header type", "type", pkt.headerType)
|
2019-01-07 10:30:42 +03:00
|
|
|
return errInvalidHeader
|
|
|
|
}
|
|
|
|
|
2019-01-10 17:11:28 +03:00
|
|
|
// The complete packet starts from headerSize _before_ the start the body.
|
|
|
|
// origIdx is the original offset, which will be 0 for a full (12-byte) header or 11 for a minimum (1-byte) header.
|
|
|
|
headBytes := pkt.header
|
|
|
|
hSize := headerSizes[pkt.headerType]
|
2019-01-11 02:22:21 +03:00
|
|
|
origIdx := fullHeaderSize - hSize
|
2019-01-07 10:30:42 +03:00
|
|
|
|
2019-01-10 17:11:28 +03:00
|
|
|
// adjust 1 or 2 bytes for the channel
|
|
|
|
cSize := 0
|
2019-01-07 10:30:42 +03:00
|
|
|
switch {
|
|
|
|
case pkt.channel > 319:
|
|
|
|
cSize = 2
|
|
|
|
case pkt.channel > 63:
|
|
|
|
cSize = 1
|
|
|
|
}
|
|
|
|
|
|
|
|
if cSize != 0 {
|
|
|
|
origIdx -= cSize
|
|
|
|
hSize += cSize
|
|
|
|
}
|
|
|
|
|
2019-01-10 17:11:28 +03:00
|
|
|
// adjust 4 bytes for the timestamp
|
2019-01-07 10:30:42 +03:00
|
|
|
var ts uint32
|
|
|
|
if prevPkt != nil {
|
|
|
|
ts = uint32(int(pkt.timestamp) - last)
|
|
|
|
}
|
|
|
|
if ts >= 0xffffff {
|
|
|
|
origIdx -= 4
|
|
|
|
hSize += 4
|
2019-01-07 16:29:41 +03:00
|
|
|
s.log(DebugLevel, pkg+"larger timestamp than 24 bits", "timestamp", ts)
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
headerIdx := origIdx
|
|
|
|
|
|
|
|
c := pkt.headerType << 6
|
|
|
|
switch cSize {
|
|
|
|
case 0:
|
|
|
|
c |= byte(pkt.channel)
|
|
|
|
case 1:
|
|
|
|
// Do nothing.
|
|
|
|
case 2:
|
|
|
|
c |= 1
|
|
|
|
}
|
|
|
|
headBytes[headerIdx] = c
|
|
|
|
headerIdx++
|
|
|
|
|
|
|
|
if cSize != 0 {
|
|
|
|
tmp := pkt.channel - 64
|
|
|
|
headBytes[headerIdx] = byte(tmp & 0xff)
|
|
|
|
headerIdx++
|
|
|
|
|
|
|
|
if cSize == 2 {
|
|
|
|
headBytes[headerIdx] = byte(tmp >> 8)
|
|
|
|
headerIdx++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-10 05:53:12 +03:00
|
|
|
if headerSizes[pkt.headerType] > 1 {
|
2019-01-07 10:30:42 +03:00
|
|
|
res := ts
|
|
|
|
if ts > 0xffffff {
|
|
|
|
res = 0xffffff
|
|
|
|
}
|
2019-01-11 23:43:27 +03:00
|
|
|
amf.EncodeInt24(headBytes[headerIdx:], int32(res))
|
2019-01-07 10:30:42 +03:00
|
|
|
headerIdx += 3 // 24bits
|
|
|
|
}
|
|
|
|
|
2019-01-10 05:53:12 +03:00
|
|
|
if headerSizes[pkt.headerType] > 4 {
|
2019-01-11 23:43:27 +03:00
|
|
|
amf.EncodeInt24(headBytes[headerIdx:], int32(pkt.bodySize))
|
2019-01-07 10:30:42 +03:00
|
|
|
headerIdx += 3 // 24bits
|
|
|
|
headBytes[headerIdx] = pkt.packetType
|
|
|
|
headerIdx++
|
|
|
|
}
|
|
|
|
|
2019-01-10 05:53:12 +03:00
|
|
|
if headerSizes[pkt.headerType] > 8 {
|
2019-01-10 17:11:28 +03:00
|
|
|
binary.LittleEndian.PutUint32(headBytes[headerIdx:headerIdx+4], uint32(pkt.info))
|
|
|
|
headerIdx += 4 // 32bits
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if ts >= 0xffffff {
|
2019-01-11 23:43:27 +03:00
|
|
|
amf.EncodeInt32(headBytes[headerIdx:], int32(ts))
|
2019-01-07 10:30:42 +03:00
|
|
|
headerIdx += 4 // 32bits
|
|
|
|
}
|
|
|
|
|
|
|
|
size := int(pkt.bodySize)
|
|
|
|
chunkSize := int(s.outChunkSize)
|
|
|
|
|
2019-01-10 17:11:28 +03:00
|
|
|
if s.deferred == nil {
|
|
|
|
// Defer sending small audio packets (at most once).
|
2019-01-11 02:22:21 +03:00
|
|
|
if pkt.packetType == packetTypeAudio && size < chunkSize {
|
2019-01-10 17:11:28 +03:00
|
|
|
s.deferred = headBytes[origIdx:][:size+hSize]
|
|
|
|
s.log(DebugLevel, pkg+"deferred sending packet", "size", size, "la", s.link.conn.LocalAddr(), "ra", s.link.conn.RemoteAddr())
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Send previously deferrd packet if combining it with the next one would exceed the chunk size.
|
|
|
|
if len(s.deferred)+size+hSize > chunkSize {
|
|
|
|
s.log(DebugLevel, pkg+"sending deferred packet separately", "size", len(s.deferred))
|
2019-01-11 03:05:20 +03:00
|
|
|
_, err := s.write(s.deferred)
|
2019-01-10 17:11:28 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
s.deferred = nil
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(kortschak): Rewrite this horrific peice of premature optimisation.
|
|
|
|
// NB: RTMP wants packets in chunks which are 128 bytes by default, but the server may request a different size.
|
2019-01-10 17:11:28 +03:00
|
|
|
s.log(DebugLevel, pkg+"sending packet", "la", s.link.conn.LocalAddr(), "ra", s.link.conn.RemoteAddr(), "size", size)
|
2019-01-07 10:30:42 +03:00
|
|
|
for size+hSize != 0 {
|
|
|
|
if chunkSize > size {
|
|
|
|
chunkSize = size
|
|
|
|
}
|
|
|
|
bytes := headBytes[origIdx:][:chunkSize+hSize]
|
2019-01-07 16:29:41 +03:00
|
|
|
if s.deferred != nil {
|
2019-01-07 10:30:42 +03:00
|
|
|
// Prepend the previously deferred packet and write it with the current one.
|
2019-01-10 17:11:28 +03:00
|
|
|
s.log(DebugLevel, pkg+"combining deferred packet", "size", len(s.deferred))
|
2019-01-07 16:29:41 +03:00
|
|
|
bytes = append(s.deferred, bytes...)
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
2019-01-11 03:05:20 +03:00
|
|
|
_, err := s.write(bytes)
|
2019-01-07 10:30:42 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-01-07 16:29:41 +03:00
|
|
|
s.deferred = nil
|
2019-01-07 10:30:42 +03:00
|
|
|
|
|
|
|
size -= chunkSize
|
|
|
|
origIdx += chunkSize + hSize
|
|
|
|
hSize = 0
|
|
|
|
|
|
|
|
if size > 0 {
|
|
|
|
origIdx -= 1 + cSize
|
|
|
|
hSize = 1 + cSize
|
|
|
|
|
|
|
|
if ts >= 0xffffff {
|
|
|
|
origIdx -= 4
|
|
|
|
hSize += 4
|
|
|
|
}
|
|
|
|
|
|
|
|
headBytes[origIdx] = 0xc0 | c
|
|
|
|
|
|
|
|
if cSize != 0 {
|
|
|
|
tmp := int(pkt.channel) - 64
|
|
|
|
headBytes[origIdx+1] = byte(tmp)
|
|
|
|
|
|
|
|
if cSize == 2 {
|
|
|
|
headBytes[origIdx+2] = byte(tmp >> 8)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ts >= 0xffffff {
|
|
|
|
extendedTimestamp := headBytes[origIdx+1+cSize:]
|
2019-01-11 23:43:27 +03:00
|
|
|
amf.EncodeInt32(extendedTimestamp[:4], int32(ts))
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We invoked a remote method
|
2019-01-11 02:22:21 +03:00
|
|
|
if pkt.packetType == packetTypeInvoke {
|
2019-01-07 10:30:42 +03:00
|
|
|
buf := pkt.body[1:]
|
2019-01-11 23:43:27 +03:00
|
|
|
meth := amf.DecodeString(buf)
|
2019-01-10 17:11:28 +03:00
|
|
|
s.log(DebugLevel, pkg+"invoking method "+meth)
|
2019-01-07 10:30:42 +03:00
|
|
|
// keep it in call queue till result arrives
|
2019-01-07 14:15:00 +03:00
|
|
|
if queue {
|
2019-01-07 10:30:42 +03:00
|
|
|
buf = buf[3+len(meth):]
|
2019-01-11 23:43:27 +03:00
|
|
|
txn := int32(amf.DecodeNumber(buf[:8]))
|
2019-01-07 10:30:42 +03:00
|
|
|
s.methodCalls = append(s.methodCalls, method{name: meth, num: txn})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-09 15:21:07 +03:00
|
|
|
if s.channelsOut[pkt.channel] == nil {
|
|
|
|
s.channelsOut[pkt.channel] = &packet{}
|
2019-01-07 10:30:42 +03:00
|
|
|
}
|
2019-01-09 15:21:07 +03:00
|
|
|
*(s.channelsOut[pkt.channel]) = *pkt
|
2019-01-07 10:30:42 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func decodeInt32LE(data []byte) int32 {
|
|
|
|
return int32(data[3])<<24 | int32(data[2])<<16 | int32(data[1])<<8 | int32(data[0])
|
|
|
|
}
|
|
|
|
|
|
|
|
func encodeInt32LE(dst []byte, v int32) int32 {
|
|
|
|
binary.LittleEndian.PutUint32(dst, uint32(v))
|
|
|
|
return 4
|
|
|
|
}
|