codec/h265: using bytes.Buffer instead of custom solution

This commit is contained in:
Saxon 2019-05-02 00:48:57 +09:30
parent 659be05a1f
commit 85ece7d1ed
2 changed files with 46 additions and 33 deletions

View File

@ -3,7 +3,7 @@ NAME
lex.go lex.go
DESCRIPTION DESCRIPTION
lex.go provides a lexer for taking h265 rtp format and lexing into access units. lex.go provides a lexer for taking RTP HEVC (H265) and lexing into access units.
AUTHORS AUTHORS
Saxon A. Nelson-Milton <saxon@ausocean.org> Saxon A. Nelson-Milton <saxon@ausocean.org>
@ -28,6 +28,7 @@ LICENSE
package h265 package h265
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"io" "io"
@ -51,10 +52,16 @@ const (
// Lexer is an H265 lexer. // Lexer is an H265 lexer.
type Lexer struct { type Lexer struct {
UsingDON bool // Indicates whether DONL and DOND will be used for in RTP stream. donl bool // Indicates whether DONL and DOND will be used for the RTP stream.
buf [maxAUSize]byte // Holds current access unit. buf *bytes.Buffer // Holds the current access unit.
off int // Holds offset into buf that we're occupying with access unit. frag bool // Indicates if we're currently dealing with a fragmentation packet.
fragmented bool // Indicates if we're currently dealing with a fragmentation unit. }
// NewLexer returns a pointer to a new Lexer.
func NewLexer(donl bool) *Lexer {
return &Lexer{
donl: donl,
buf: bytes.NewBuffer(make([]byte, 0, maxAUSize))}
} }
// Lex continually reads RTP packets from the io.Reader src and lexes into // Lex continually reads RTP packets from the io.Reader src and lexes into
@ -80,10 +87,10 @@ func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error {
nalType := (payload[0] >> 1) & 0x3f nalType := (payload[0] >> 1) & 0x3f
// If not currently fragmented then we ignore current write // If not currently fragmented then we ignore current write.
if l.fragmented && nalType != typeFragmentation { if l.frag && nalType != typeFragmentation {
l.off = 0 l.buf.Reset()
l.fragmented = false l.frag = false
continue continue
} }
@ -95,7 +102,7 @@ func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error {
case typePACI: case typePACI:
l.handlePACI(payload) l.handlePACI(payload)
default: default:
l.write(payload) l.writeWithPrefix(payload)
} }
m, err := rtp.Marker(buf[:n]) m, err := rtp.Marker(buf[:n])
@ -104,11 +111,11 @@ func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error {
} }
if m { if m {
_, err := dst.Write(l.buf[:l.off]) _, err := l.buf.WriteTo(dst)
if err != nil { if err != nil {
// TODO: work out what to do here. // TODO: work out what to do here.
} }
l.off = 0 l.buf.Reset()
} }
} }
return nil return nil
@ -119,7 +126,7 @@ func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error {
func (l *Lexer) handleAggregation(d []byte) { func (l *Lexer) handleAggregation(d []byte) {
idx := 2 idx := 2
for idx < len(d) { for idx < len(d) {
if l.UsingDON { if l.donl {
switch idx { switch idx {
case 2: case 2:
idx += 2 idx += 2
@ -131,7 +138,7 @@ func (l *Lexer) handleAggregation(d []byte) {
idx += 2 idx += 2
nalu := d[idx : idx+size] nalu := d[idx : idx+size]
idx += size idx += size
l.write(nalu) l.writeWithPrefix(nalu)
} }
} }
@ -139,20 +146,22 @@ func (l *Lexer) handleAggregation(d []byte) {
// them to the Lexer's buf. // them to the Lexer's buf.
func (l *Lexer) handleFragmentation(d []byte) { func (l *Lexer) handleFragmentation(d []byte) {
_d := d[3:] _d := d[3:]
if l.UsingDON { if l.donl {
_d = d[5:] _d = d[5:]
} }
s := d[2]&0x80 != 0 // Get start and end indiciators from FU header.
e := d[2]&0x40 != 0 start := d[2]&0x80 != 0
end := d[2]&0x40 != 0
switch { switch {
case s && !e: case start && !end:
l.fragmented = true l.frag = true
l.write(_d) l.writeWithPrefix(_d)
case !s && e: case !start && end:
l.fragmented = false l.frag = false
fallthrough fallthrough
case !s && !e: case !start && !end:
l.writeNoPrefix(_d) l.writeNoPrefix(_d)
default: default:
panic("bad fragmentation packet") panic("bad fragmentation packet")
@ -168,16 +177,14 @@ func (l *Lexer) handlePACI(d []byte) {
// write writes a NAL unit to the Lexer's buf in byte stream format using the // write writes a NAL unit to the Lexer's buf in byte stream format using the
// start code. // start code.
func (l *Lexer) write(d []byte) { func (l *Lexer) writeWithPrefix(d []byte) {
const prefix = "\x00\x00\x00\x01" const prefix = "\x00\x00\x00\x01"
copy(l.buf[l.off:], []byte(prefix)) l.buf.Write([]byte(prefix))
copy(l.buf[l.off+4:], d) l.buf.Write(d)
l.off += len(d) + 4
} }
// writeNoPrefix writes data to the Lexer's buf. This is used for non start // writeNoPrefix writes data to the Lexer's buf. This is used for non start
// fragmentations of a NALU. // fragmentations of a NALU.
func (l *Lexer) writeNoPrefix(d []byte) { func (l *Lexer) writeNoPrefix(d []byte) {
copy(l.buf[l.off:], d) l.buf.Write(d)
l.off += len(d)
} }

View File

@ -32,11 +32,13 @@ import (
"testing" "testing"
) )
// rtpReader provides the RTP stream.
type rtpReader struct { type rtpReader struct {
packets [][]byte packets [][]byte
idx int idx int
} }
// Read implements io.Reader.
func (r *rtpReader) Read(p []byte) (int, error) { func (r *rtpReader) Read(p []byte) (int, error) {
if r.idx == len(r.packets) { if r.idx == len(r.packets) {
return 0, io.EOF return 0, io.EOF
@ -47,8 +49,10 @@ func (r *rtpReader) Read(p []byte) (int, error) {
return len(b), nil return len(b), nil
} }
// destination holds the access units extracted during the lexing process.
type destination [][]byte type destination [][]byte
// Write implements io.Writer.
func (d *destination) Write(p []byte) (int, error) { func (d *destination) Write(p []byte) (int, error) {
t := make([]byte, len(p)) t := make([]byte, len(p))
copy(t, p) copy(t, p)
@ -56,16 +60,18 @@ func (d *destination) Write(p []byte) (int, error) {
return len(p), nil return len(p), nil
} }
// TestLex cheks that the Lexer can correctly extract H265 access units from
// HEVC RTP stream in RTP payload format.
func TestLex(t *testing.T) { func TestLex(t *testing.T) {
const rtpVer = 2 const rtpVer = 2
tests := []struct { tests := []struct {
don bool donl bool
packets [][]byte packets [][]byte
expect [][]byte expect [][]byte
}{ }{
{ {
don: false, donl: false,
packets: [][]byte{ packets: [][]byte{
{ // Single NAL unit. { // Single NAL unit.
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // RTP header. 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // RTP header.
@ -143,7 +149,7 @@ func TestLex(t *testing.T) {
}, },
}, },
{ {
don: true, donl: true,
packets: [][]byte{ packets: [][]byte{
{ // Single NAL unit. { // Single NAL unit.
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // RTP header. 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // RTP header.
@ -236,7 +242,7 @@ func TestLex(t *testing.T) {
for testNum, test := range tests { for testNum, test := range tests {
r := &rtpReader{packets: test.packets} r := &rtpReader{packets: test.packets}
d := &destination{} d := &destination{}
err := (&Lexer{UsingDON: test.don}).Lex(d, r, 0) err := NewLexer(test.donl).Lex(d, r, 0)
if err != nil { if err != nil {
t.Fatalf("error lexing: %v\n", err) t.Fatalf("error lexing: %v\n", err)
} }