mirror of https://bitbucket.org/ausocean/av.git
codec/h265: using bytes.Buffer instead of custom solution
This commit is contained in:
parent
659be05a1f
commit
85ece7d1ed
|
@ -3,7 +3,7 @@ NAME
|
|||
lex.go
|
||||
|
||||
DESCRIPTION
|
||||
lex.go provides a lexer for taking h265 rtp format and lexing into access units.
|
||||
lex.go provides a lexer for taking RTP HEVC (H265) and lexing into access units.
|
||||
|
||||
AUTHORS
|
||||
Saxon A. Nelson-Milton <saxon@ausocean.org>
|
||||
|
@ -28,6 +28,7 @@ LICENSE
|
|||
package h265
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -51,10 +52,16 @@ const (
|
|||
|
||||
// Lexer is an H265 lexer.
|
||||
type Lexer struct {
|
||||
UsingDON bool // Indicates whether DONL and DOND will be used for in RTP stream.
|
||||
buf [maxAUSize]byte // Holds current access unit.
|
||||
off int // Holds offset into buf that we're occupying with access unit.
|
||||
fragmented bool // Indicates if we're currently dealing with a fragmentation unit.
|
||||
donl bool // Indicates whether DONL and DOND will be used for the RTP stream.
|
||||
buf *bytes.Buffer // Holds the current access unit.
|
||||
frag bool // Indicates if we're currently dealing with a fragmentation packet.
|
||||
}
|
||||
|
||||
// NewLexer returns a pointer to a new Lexer.
|
||||
func NewLexer(donl bool) *Lexer {
|
||||
return &Lexer{
|
||||
donl: donl,
|
||||
buf: bytes.NewBuffer(make([]byte, 0, maxAUSize))}
|
||||
}
|
||||
|
||||
// Lex continually reads RTP packets from the io.Reader src and lexes into
|
||||
|
@ -80,10 +87,10 @@ func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error {
|
|||
|
||||
nalType := (payload[0] >> 1) & 0x3f
|
||||
|
||||
// If not currently fragmented then we ignore current write
|
||||
if l.fragmented && nalType != typeFragmentation {
|
||||
l.off = 0
|
||||
l.fragmented = false
|
||||
// If not currently fragmented then we ignore current write.
|
||||
if l.frag && nalType != typeFragmentation {
|
||||
l.buf.Reset()
|
||||
l.frag = false
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -95,7 +102,7 @@ func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error {
|
|||
case typePACI:
|
||||
l.handlePACI(payload)
|
||||
default:
|
||||
l.write(payload)
|
||||
l.writeWithPrefix(payload)
|
||||
}
|
||||
|
||||
m, err := rtp.Marker(buf[:n])
|
||||
|
@ -104,11 +111,11 @@ func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error {
|
|||
}
|
||||
|
||||
if m {
|
||||
_, err := dst.Write(l.buf[:l.off])
|
||||
_, err := l.buf.WriteTo(dst)
|
||||
if err != nil {
|
||||
// TODO: work out what to do here.
|
||||
}
|
||||
l.off = 0
|
||||
l.buf.Reset()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -119,7 +126,7 @@ func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error {
|
|||
func (l *Lexer) handleAggregation(d []byte) {
|
||||
idx := 2
|
||||
for idx < len(d) {
|
||||
if l.UsingDON {
|
||||
if l.donl {
|
||||
switch idx {
|
||||
case 2:
|
||||
idx += 2
|
||||
|
@ -131,7 +138,7 @@ func (l *Lexer) handleAggregation(d []byte) {
|
|||
idx += 2
|
||||
nalu := d[idx : idx+size]
|
||||
idx += size
|
||||
l.write(nalu)
|
||||
l.writeWithPrefix(nalu)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -139,20 +146,22 @@ func (l *Lexer) handleAggregation(d []byte) {
|
|||
// them to the Lexer's buf.
|
||||
func (l *Lexer) handleFragmentation(d []byte) {
|
||||
_d := d[3:]
|
||||
if l.UsingDON {
|
||||
if l.donl {
|
||||
_d = d[5:]
|
||||
}
|
||||
|
||||
s := d[2]&0x80 != 0
|
||||
e := d[2]&0x40 != 0
|
||||
// Get start and end indiciators from FU header.
|
||||
start := d[2]&0x80 != 0
|
||||
end := d[2]&0x40 != 0
|
||||
|
||||
switch {
|
||||
case s && !e:
|
||||
l.fragmented = true
|
||||
l.write(_d)
|
||||
case !s && e:
|
||||
l.fragmented = false
|
||||
case start && !end:
|
||||
l.frag = true
|
||||
l.writeWithPrefix(_d)
|
||||
case !start && end:
|
||||
l.frag = false
|
||||
fallthrough
|
||||
case !s && !e:
|
||||
case !start && !end:
|
||||
l.writeNoPrefix(_d)
|
||||
default:
|
||||
panic("bad fragmentation packet")
|
||||
|
@ -168,16 +177,14 @@ func (l *Lexer) handlePACI(d []byte) {
|
|||
|
||||
// write writes a NAL unit to the Lexer's buf in byte stream format using the
|
||||
// start code.
|
||||
func (l *Lexer) write(d []byte) {
|
||||
func (l *Lexer) writeWithPrefix(d []byte) {
|
||||
const prefix = "\x00\x00\x00\x01"
|
||||
copy(l.buf[l.off:], []byte(prefix))
|
||||
copy(l.buf[l.off+4:], d)
|
||||
l.off += len(d) + 4
|
||||
l.buf.Write([]byte(prefix))
|
||||
l.buf.Write(d)
|
||||
}
|
||||
|
||||
// writeNoPrefix writes data to the Lexer's buf. This is used for non start
|
||||
// fragmentations of a NALU.
|
||||
func (l *Lexer) writeNoPrefix(d []byte) {
|
||||
copy(l.buf[l.off:], d)
|
||||
l.off += len(d)
|
||||
l.buf.Write(d)
|
||||
}
|
||||
|
|
|
@ -32,11 +32,13 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
// rtpReader provides the RTP stream.
|
||||
type rtpReader struct {
|
||||
packets [][]byte
|
||||
idx int
|
||||
}
|
||||
|
||||
// Read implements io.Reader.
|
||||
func (r *rtpReader) Read(p []byte) (int, error) {
|
||||
if r.idx == len(r.packets) {
|
||||
return 0, io.EOF
|
||||
|
@ -47,8 +49,10 @@ func (r *rtpReader) Read(p []byte) (int, error) {
|
|||
return len(b), nil
|
||||
}
|
||||
|
||||
// destination holds the access units extracted during the lexing process.
|
||||
type destination [][]byte
|
||||
|
||||
// Write implements io.Writer.
|
||||
func (d *destination) Write(p []byte) (int, error) {
|
||||
t := make([]byte, len(p))
|
||||
copy(t, p)
|
||||
|
@ -56,16 +60,18 @@ func (d *destination) Write(p []byte) (int, error) {
|
|||
return len(p), nil
|
||||
}
|
||||
|
||||
// TestLex cheks that the Lexer can correctly extract H265 access units from
|
||||
// HEVC RTP stream in RTP payload format.
|
||||
func TestLex(t *testing.T) {
|
||||
const rtpVer = 2
|
||||
|
||||
tests := []struct {
|
||||
don bool
|
||||
donl bool
|
||||
packets [][]byte
|
||||
expect [][]byte
|
||||
}{
|
||||
{
|
||||
don: false,
|
||||
donl: false,
|
||||
packets: [][]byte{
|
||||
{ // Single NAL unit.
|
||||
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // RTP header.
|
||||
|
@ -143,7 +149,7 @@ func TestLex(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
don: true,
|
||||
donl: true,
|
||||
packets: [][]byte{
|
||||
{ // Single NAL unit.
|
||||
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // RTP header.
|
||||
|
@ -236,7 +242,7 @@ func TestLex(t *testing.T) {
|
|||
for testNum, test := range tests {
|
||||
r := &rtpReader{packets: test.packets}
|
||||
d := &destination{}
|
||||
err := (&Lexer{UsingDON: test.don}).Lex(d, r, 0)
|
||||
err := NewLexer(test.donl).Lex(d, r, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("error lexing: %v\n", err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue