Merged in h265-lex-to-extract (pull request #392)

codec/h265: rename Lexer to Extractor

Approved-by: Trek Hopton <trek.hopton@gmail.com>
This commit is contained in:
Saxon Milton 2020-03-29 00:29:47 +00:00
commit 09a763728e
3 changed files with 52 additions and 50 deletions

View File

@ -1,9 +1,10 @@
/* /*
NAME NAME
lex.go extract.go
DESCRIPTION DESCRIPTION
lex.go provides a lexer for taking RTP HEVC (H265) and lexing into access units. extract.go provides a extractor for taking RTP HEVC (H265) and extracting
access units.
AUTHORS AUTHORS
Saxon A. Nelson-Milton <saxon@ausocean.org> Saxon A. Nelson-Milton <saxon@ausocean.org>
@ -25,7 +26,7 @@ LICENSE
in gpl.txt. If not, see http://www.gnu.org/licenses. in gpl.txt. If not, see http://www.gnu.org/licenses.
*/ */
// Package h265 provides an RTP h265 lexer that can extract h265 access units // Package h265 provides an RTP h265 extractor that can extract h265 access units
// from an RTP stream. // from an RTP stream.
package h265 package h265
@ -52,25 +53,25 @@ const (
maxRTPSize = 4096 maxRTPSize = 4096
) )
// Lexer is an H265 lexer. // Extractor is an RTP HEVC access unit extractor.
type Lexer struct { type Extractor struct {
donl bool // Indicates whether DONL and DOND will be used for the RTP stream. donl bool // Indicates whether DONL and DOND will be used for the RTP stream.
buf *bytes.Buffer // Holds the current access unit. buf *bytes.Buffer // Holds the current access unit.
frag bool // Indicates if we're currently dealing with a fragmentation packet. frag bool // Indicates if we're currently dealing with a fragmentation packet.
} }
// NewLexer returns a new Lexer. // NewExtractor returns a new Extractor.
func NewLexer(donl bool) *Lexer { func NewExtractor(donl bool) *Extractor {
return &Lexer{ return &Extractor{
donl: donl, donl: donl,
buf: bytes.NewBuffer(make([]byte, 0, maxAUSize)), buf: bytes.NewBuffer(make([]byte, 0, maxAUSize)),
} }
} }
// Lex continually reads RTP packets from the io.Reader src and lexes into // Extract continually reads RTP packets from the io.Reader src and extracts
// access units which are written to the io.Writer dst. Lex expects that for // H.265 access units which are written to the io.Writer dst. Extract expects
// each read from src, a single RTP packet is received. // that for each read from src, a single RTP packet is received.
func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error { func (e *Extractor) Extract(dst io.Writer, src io.Reader, delay time.Duration) error {
buf := make([]byte, maxRTPSize) buf := make([]byte, maxRTPSize)
for { for {
n, err := src.Read(buf) n, err := src.Read(buf)
@ -78,7 +79,7 @@ func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error {
case nil: // Do nothing. case nil: // Do nothing.
default: default:
if err == io.EOF { if err == io.EOF {
if l.buf.Len() == 0 { if e.buf.Len() == 0 {
return io.EOF return io.EOF
} }
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
@ -94,21 +95,21 @@ func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error {
nalType := (payload[0] >> 1) & 0x3f nalType := (payload[0] >> 1) & 0x3f
// If not currently fragmented then we ignore current write. // If not currently fragmented then we ignore current write.
if l.frag && nalType != typeFragmentation { if e.frag && nalType != typeFragmentation {
l.buf.Reset() e.buf.Reset()
l.frag = false e.frag = false
continue continue
} }
switch nalType { switch nalType {
case typeAggregation: case typeAggregation:
l.handleAggregation(payload) e.handleAggregation(payload)
case typeFragmentation: case typeFragmentation:
l.handleFragmentation(payload) e.handleFragmentation(payload)
case typePACI: case typePACI:
l.handlePACI(payload) e.handlePACI(payload)
default: default:
l.writeWithPrefix(payload) e.writeWithPrefix(payload)
} }
markerIsSet, err := rtp.Marker(buf[:n]) markerIsSet, err := rtp.Marker(buf[:n])
@ -117,22 +118,22 @@ func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error {
} }
if markerIsSet { if markerIsSet {
_, err := l.buf.WriteTo(dst) _, err := e.buf.WriteTo(dst)
if err != nil { if err != nil {
// TODO: work out what to do here. // TODO: work out what to do here.
} }
l.buf.Reset() e.buf.Reset()
} }
} }
return nil return nil
} }
// handleAggregation parses NAL units from an aggregation packet and writes // handleAggregation parses NAL units from an aggregation packet and writes
// them to the Lexers buffer buf. // them to the Extractor's buffer buf.
func (l *Lexer) handleAggregation(d []byte) { func (e *Extractor) handleAggregation(d []byte) {
idx := 2 idx := 2
for idx < len(d) { for idx < len(d) {
if l.donl { if e.donl {
switch idx { switch idx {
case 2: case 2:
idx += 2 idx += 2
@ -144,13 +145,13 @@ func (l *Lexer) handleAggregation(d []byte) {
idx += 2 idx += 2
nalu := d[idx : idx+size] nalu := d[idx : idx+size]
idx += size idx += size
l.writeWithPrefix(nalu) e.writeWithPrefix(nalu)
} }
} }
// handleFragmentation parses NAL units from fragmentation packets and writes // handleFragmentation parses NAL units from fragmentation packets and writes
// them to the Lexer's buf. // them to the Extractor's buf.
func (l *Lexer) handleFragmentation(d []byte) { func (e *Extractor) handleFragmentation(d []byte) {
// Get start and end indiciators from FU header. // Get start and end indiciators from FU header.
start := d[2]&0x80 != 0 start := d[2]&0x80 != 0
end := d[2]&0x40 != 0 end := d[2]&0x40 != 0
@ -159,27 +160,27 @@ func (l *Lexer) handleFragmentation(d []byte) {
b2 := d[1] b2 := d[1]
if start { if start {
d = d[1:] d = d[1:]
if l.donl { if e.donl {
d = d[2:] d = d[2:]
} }
d[0] = b1 d[0] = b1
d[1] = b2 d[1] = b2
} else { } else {
d = d[3:] d = d[3:]
if l.donl { if e.donl {
d = d[2:] d = d[2:]
} }
} }
switch { switch {
case start && !end: case start && !end:
l.frag = true e.frag = true
l.writeWithPrefix(d) e.writeWithPrefix(d)
case !start && end: case !start && end:
l.frag = false e.frag = false
fallthrough fallthrough
case !start && !end: case !start && !end:
l.writeNoPrefix(d) e.writeNoPrefix(d)
default: default:
panic("bad fragmentation packet") panic("bad fragmentation packet")
} }
@ -188,20 +189,20 @@ func (l *Lexer) handleFragmentation(d []byte) {
// handlePACI will handle PACI packets // handlePACI will handle PACI packets
// //
// TODO: complete this // TODO: complete this
func (l *Lexer) handlePACI(d []byte) { func (e *Extractor) handlePACI(d []byte) {
panic("unsupported nal type") panic("unsupported nal type")
} }
// write writes a NAL unit to the Lexer's buf in byte stream format using the // write writes a NAL unit to the Extractor's buf in byte stream format using the
// start code. // start code.
func (l *Lexer) writeWithPrefix(d []byte) { func (e *Extractor) writeWithPrefix(d []byte) {
const prefix = "\x00\x00\x00\x01" const prefix = "\x00\x00\x00\x01"
l.buf.Write([]byte(prefix)) e.buf.Write([]byte(prefix))
l.buf.Write(d) e.buf.Write(d)
} }
// writeNoPrefix writes data to the Lexer's buf. This is used for non start // writeNoPrefix writes data to the Extractor's buf. This is used for non start
// fragmentations of a NALU. // fragmentations of a NALU.
func (l *Lexer) writeNoPrefix(d []byte) { func (e *Extractor) writeNoPrefix(d []byte) {
l.buf.Write(d) e.buf.Write(d)
} }

View File

@ -1,9 +1,10 @@
/* /*
NAME NAME
lex_test.go extract_test.go
DESCRIPTION DESCRIPTION
lex_test.go provides tests to check validity of the Lexer found in lex.go. extract_test.go provides tests to check validity of the Extractor found in
extract.go.
AUTHORS AUTHORS
Saxon A. Nelson-Milton <saxon@ausocean.org> Saxon A. Nelson-Milton <saxon@ausocean.org>
@ -53,7 +54,7 @@ func (r *rtpReader) Read(p []byte) (int, error) {
return n, nil return n, nil
} }
// destination holds the access units extracted during the lexing process. // destination holds the access units extracted during the extraction process.
type destination [][]byte type destination [][]byte
// Write implements io.Writer. // Write implements io.Writer.
@ -64,7 +65,7 @@ func (d *destination) Write(p []byte) (int, error) {
return len(p), nil return len(p), nil
} }
// TestLex checks that the Lexer can correctly extract H265 access units from // TestLex checks that the Extractor can correctly extract H265 access units from
// HEVC RTP stream in RTP payload format. // HEVC RTP stream in RTP payload format.
func TestLex(t *testing.T) { func TestLex(t *testing.T) {
const rtpVer = 2 const rtpVer = 2
@ -246,11 +247,11 @@ func TestLex(t *testing.T) {
for testNum, test := range tests { for testNum, test := range tests {
r := &rtpReader{packets: test.packets} r := &rtpReader{packets: test.packets}
d := &destination{} d := &destination{}
err := NewLexer(test.donl).Lex(d, r, 0) err := NewExtractor(test.donl).Extract(d, r, 0)
switch err { switch err {
case nil, io.EOF: // Do nothing case nil, io.EOF: // Do nothing
default: default:
t.Fatalf("error lexing: %v\n", err) t.Fatalf("error extracting: %v\n", err)
} }
for i, accessUnit := range test.expect { for i, accessUnit := range test.expect {

View File

@ -417,7 +417,7 @@ func (r *Revid) setLexer(c uint8, isRTSP bool) {
} }
case codecutil.H265: case codecutil.H265:
r.cfg.Logger.Log(logger.Debug, "using H.265 codec") r.cfg.Logger.Log(logger.Debug, "using H.265 codec")
r.lexTo = h265.NewLexer(false).Lex r.lexTo = h265.NewExtractor(false).Extract
if !isRTSP { if !isRTSP {
panic("byte stream H.265 lexing not implemented") panic("byte stream H.265 lexing not implemented")
} }