mirror of https://bitbucket.org/ausocean/av.git
257 lines
6.8 KiB
Go
257 lines
6.8 KiB
Go
/*
|
|
NAME
|
|
FLVGenerator.go
|
|
|
|
DESCRIPTION
|
|
See Readme.md
|
|
|
|
AUTHOR
|
|
Saxon Nelson-Milton <saxon@ausocean.org>
|
|
|
|
LICENSE
|
|
FLVGenerator.go is Copyright (C) 2017 the Australian Ocean Lab (AusOcean)
|
|
|
|
It is free software: you can redistribute it and/or modify them
|
|
under the terms of the GNU General Public License as published by the
|
|
Free Software Foundation, either version 3 of the License, or (at your
|
|
option) any later version.
|
|
|
|
It is distributed in the hope that it will be useful, but WITHOUT
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with revid in gpl.txt. If not, see [GNU licenses](http://www.gnu.org/licenses).
|
|
*/
|
|
package generator
|
|
|
|
import (
|
|
"bitbucket.org/ausocean/av/flv"
|
|
//"../flv"
|
|
_"fmt"
|
|
"time"
|
|
)
|
|
|
|
const (
|
|
inputChanLength = 1000
|
|
outputChanLength = 1000
|
|
audioSize = 18
|
|
videoHeaderSize = 16
|
|
interFrameCode = 1
|
|
keyFrameCode = 5
|
|
sequenceCode = 6
|
|
)
|
|
|
|
// Data representing silent audio (required for youtube)
|
|
var dummyAudioTag1Data = []byte{0x00, 0x12, 0x08, 0x56, 0xe5, 0x00}
|
|
var dummyAudioTag2Data = []byte{0x01, 0xdc, 0x00, 0x4c, 0x61, 0x76, 0x63, 0x35,
|
|
0x38, 0x2e, 0x36, 0x2e, 0x31, 0x30, 0x32, 0x00, 0x02, 0x30, 0x40, 0x0e}
|
|
|
|
// flvGenerator provides properties required for the generation of flv video
|
|
// from raw video data
|
|
type flvGenerator struct {
|
|
fps uint
|
|
inputChan chan []byte
|
|
outputChan chan []byte
|
|
audioFlag bool
|
|
videoFlag bool
|
|
lastTagSize int
|
|
currentTimestamp uint32
|
|
header flv.Header
|
|
startTime time.Time
|
|
firstTag bool
|
|
}
|
|
|
|
// GetInputChan returns the input channel to the generator. This is where the
|
|
// raw data frames are entered into the generator
|
|
func (g *flvGenerator) GetInputChan() chan []byte {
|
|
return g.inputChan
|
|
}
|
|
|
|
// GetOutputChan retuns the output chan of the generator - this is where the
|
|
// flv packets (more specifically tags) are outputted.
|
|
func (g *flvGenerator) GetOutputChan() chan []byte {
|
|
return g.outputChan
|
|
}
|
|
|
|
// NewFlvGenerator retuns an instance of the flvGenerator struct
|
|
func NewFlvGenerator(audio bool, video bool, fps uint) (g *flvGenerator) {
|
|
g = new(flvGenerator)
|
|
g.fps = fps
|
|
g.audioFlag = audio
|
|
g.videoFlag = video
|
|
g.currentTimestamp = 0
|
|
g.lastTagSize = 0
|
|
g.inputChan = make(chan []byte, inputChanLength)
|
|
g.outputChan = make(chan []byte, outputChanLength)
|
|
g.firstTag = true
|
|
return
|
|
}
|
|
|
|
// Start beings the generation routine - i.e. if raw data is given to the input
|
|
// channel flv tags will be produced and available from the output channel.
|
|
func (g *flvGenerator) Start() {
|
|
go g.generate()
|
|
}
|
|
|
|
// GenHeader generates the flv header and sends it down the output chan for use
|
|
// This will generally be called once at the start of file writing/transmission.
|
|
func (g *flvGenerator) GenHeader() {
|
|
header := flv.Header{
|
|
AudioFlag: g.audioFlag,
|
|
VideoFlag: g.videoFlag,
|
|
}
|
|
g.outputChan <- header.ToByteSlice()
|
|
}
|
|
|
|
// getNextTimestamp generates and returns the next timestamp based on the given
|
|
// fps rate
|
|
func (g *flvGenerator) getNextTimestamp() (timestamp uint32) {
|
|
if g.firstTag {
|
|
g.startTime = time.Now()
|
|
g.firstTag = false
|
|
timestamp = 0
|
|
return
|
|
}
|
|
//timestamp = g.currentTimestamp
|
|
//g.currentTimestamp += 40
|
|
timestamp = uint32(time.Now().Sub(g.startTime).Seconds()*float64(1000))
|
|
//fmt.Printf("timestamp: %v", timestamp)
|
|
return
|
|
}
|
|
|
|
// ResetTimestamp resets the current timestamp to 0 i.e. equivalent to start of
|
|
// transmission
|
|
func (g *flvGenerator) ResetTimestamp() {
|
|
g.currentTimestamp = 0
|
|
}
|
|
|
|
func isKeyFrame(frame []byte) bool {
|
|
byteChannel := make(chan byte, len(frame))
|
|
for i := range frame {
|
|
byteChannel <- frame[i]
|
|
}
|
|
for len(byteChannel) >= 5{
|
|
aByte := <-byteChannel
|
|
for i:=1; aByte == 0x00 && i != 4; i++ {
|
|
aByte = <-byteChannel
|
|
if ( aByte == 0x01 && i == 2 ) || ( aByte == 0x01 && i == 3 ) {
|
|
aByte = <-byteChannel
|
|
nalType := aByte & 0x1F
|
|
switch nalType {
|
|
case interFrameCode:
|
|
return false
|
|
case keyFrameCode:
|
|
return true
|
|
case 6:
|
|
return true
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
func isSequenceHeader(frame []byte) bool {
|
|
byteChannel := make(chan byte, len(frame))
|
|
for i := range frame {
|
|
byteChannel <- frame[i]
|
|
}
|
|
for len(byteChannel) >= 5{
|
|
aByte := <-byteChannel
|
|
for i:=1; aByte == 0x00 && i != 4; i++ {
|
|
aByte = <-byteChannel
|
|
if ( aByte == 0x01 && i == 2 ) || ( aByte == 0x01 && i == 3 ) {
|
|
aByte = <-byteChannel
|
|
nalType := aByte & 0x1F
|
|
switch nalType {
|
|
case 1:
|
|
return false
|
|
case 5:
|
|
return false
|
|
case 6:
|
|
return true
|
|
case 7:
|
|
return true
|
|
case 8:
|
|
return true
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
|
|
// generate takes in raw video data from the input chan and packetises it into
|
|
// flv tags, which are then passed to the output channel.
|
|
func (g *flvGenerator) generate() {
|
|
g.GenHeader()
|
|
var frameType byte
|
|
var packetType byte
|
|
for {
|
|
select {
|
|
case videoFrame := <-g.inputChan:
|
|
if isKeyFrame(videoFrame) {
|
|
frameType = flv.KeyFrameType
|
|
} else {
|
|
frameType = flv.InterFrameType
|
|
}
|
|
if isSequenceHeader(videoFrame) {
|
|
packetType = flv.SequenceHeader
|
|
} else {
|
|
packetType = flv.AVCNALU
|
|
}
|
|
timeStamp := g.getNextTimestamp()
|
|
// Do we have video to send off ?
|
|
if g.videoFlag {
|
|
tag := flv.VideoTag{
|
|
TagType: uint8(flv.VideoTagType),
|
|
DataSize: uint32(len(videoFrame)) + flv.DataHeaderLength,
|
|
Timestamp: timeStamp,
|
|
TimestampExtended: flv.NoTimestampExtension,
|
|
FrameType: frameType,
|
|
Codec: flv.H264,
|
|
PacketType: packetType,
|
|
CompositionTime: 0,
|
|
Data: videoFrame,
|
|
PrevTagSize: uint32(videoHeaderSize + len(videoFrame)),
|
|
}
|
|
g.outputChan <- tag.ToByteSlice()
|
|
}
|
|
// Do we even have some audio to send off ?
|
|
if g.audioFlag {
|
|
// Not sure why but we need two audio tags for dummy silent audio
|
|
tag := flv.AudioTag{
|
|
TagType: uint8(flv.AudioTagType),
|
|
DataSize: 7,
|
|
Timestamp: timeStamp,
|
|
TimestampExtended: flv.NoTimestampExtension,
|
|
SoundFormat: flv.AACAudioFormat,
|
|
SoundRate: 3,
|
|
SoundSize: true,
|
|
SoundType: true,
|
|
Data: dummyAudioTag1Data,
|
|
PrevTagSize: uint32(audioSize),
|
|
}
|
|
g.outputChan <- tag.ToByteSlice()
|
|
|
|
tag = flv.AudioTag{
|
|
TagType: uint8(flv.AudioTagType),
|
|
DataSize: 21,
|
|
Timestamp: timeStamp,
|
|
TimestampExtended: flv.NoTimestampExtension,
|
|
SoundFormat: flv.AACAudioFormat,
|
|
SoundRate: 3,
|
|
SoundSize: true,
|
|
SoundType: true,
|
|
Data: dummyAudioTag2Data,
|
|
PrevTagSize: uint32(22),
|
|
}
|
|
g.outputChan <- tag.ToByteSlice()
|
|
}
|
|
}
|
|
}
|
|
}
|