/* NAME lex.go DESCRIPTION lex.go provides a lexer to lex h264 bytestream into access units. AUTHOR Dan Kortschak LICENSE lex.go is Copyright (C) 2017 the Australian Ocean Lab (AusOcean) It is free software: you can redistribute it and/or modify them under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. It is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with revid in gpl.txt. If not, see http://www.gnu.org/licenses. */ // lex.go provides a lexer to lex h264 bytestream into access units. package h264 import ( "bytes" "encoding/binary" "fmt" "io" "time" "bitbucket.org/ausocean/av/codec/codecutil" "bitbucket.org/ausocean/av/protocol/rtp" ) // NAL types. const ( // Single nal units bounds. typeSingleNALULowBound = 1 typeSingleNALUHighBound = 23 // Single-time aggregation packets. typeSTAPA = 24 typeSTAPB = 25 // Multi-time aggregation packets. typeMTAP16 = 26 typeMTAP24 = 27 // Fragmentation packets. typeFUA = 28 typeFUB = 29 ) var noDelay = make(chan time.Time) func init() { close(noDelay) } var h264Prefix = [...]byte{0x00, 0x00, 0x01, 0x09, 0xf0} // LexFromBytestream lexes H.264 NAL units read from src into separate writes // to dst with successive writes being performed not earlier than the specified // delay. NAL units are split after type 1 (Coded slice of a non-IDR picture), 5 // (Coded slice of a IDR picture) and 8 (Picture parameter set). func LexFromBytestream(dst io.Writer, src io.Reader, delay time.Duration) error { var tick <-chan time.Time if delay == 0 { tick = noDelay } else { ticker := time.NewTicker(delay) defer ticker.Stop() tick = ticker.C } const bufSize = 8 << 10 c := codecutil.NewByteScanner(src, make([]byte, 4<<10)) // Standard file buffer size. buf := make([]byte, len(h264Prefix), bufSize) copy(buf, h264Prefix[:]) writeOut := false outer: for { var b byte var err error buf, b, err = c.ScanUntil(buf, 0x00) if err != nil { if err != io.EOF { return err } break } for n := 1; b == 0x0 && n < 4; n++ { b, err = c.ReadByte() if err != nil { if err != io.EOF { return err } break outer } buf = append(buf, b) if b != 0x1 || (n != 2 && n != 3) { continue } if writeOut { <-tick _, err := dst.Write(buf[:len(buf)-(n+1)]) if err != nil { return err } buf = make([]byte, len(h264Prefix)+n, bufSize) copy(buf, h264Prefix[:]) buf = append(buf, 1) writeOut = false } b, err = c.ReadByte() if err != nil { if err != io.EOF { return err } break outer } buf = append(buf, b) // http://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-H.264-200305-S!!PDF-E&type=items // Table 7-1 NAL unit type codes const ( nonIdrPic = 1 idrPic = 5 suppEnhInfo = 6 paramSet = 8 ) switch nalTyp := b & 0x1f; nalTyp { case nonIdrPic, idrPic, paramSet, suppEnhInfo: writeOut = true } } } if len(buf) == len(h264Prefix) { return nil } <-tick _, err := dst.Write(buf) return err } // Buffer sizes. const ( maxAUSize = 100000 // Max access unit size in bytes. maxRTPSize = 1500 // Max ethernet transmission unit in bytes. ) // RTPLexer is a lexer for lexing H264 from RTP packets. type RTPLexer struct { buf *bytes.Buffer // Holds the current access unit. frag bool // Indicates if we're currently dealing with a fragmentation packet. } // NewRTPLexer returns a new RTPLexer. func NewRTPLexer() *RTPLexer { return &RTPLexer{ buf: bytes.NewBuffer(make([]byte, 0, maxAUSize))} } // Lex extracts H264 access units from an RTP stream. This function // expects that each read from src will provide a single RTP packet. func (l *RTPLexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error { buf := make([]byte, maxRTPSize) for { n, err := src.Read(buf) switch err { case nil: // Do nothing. case io.EOF: return nil default: return fmt.Errorf("source read error: %v\n", err) } // Get payload from RTP packet. payload, err := rtp.Payload(buf[:n]) if err != nil { return fmt.Errorf("could not get RTP payload, failed with err: %v\n", err) } nalType := payload[0] & 0x1f // If not currently fragmented then we ignore current write. if l.frag && nalType != typeFUA { l.buf.Reset() l.frag = false continue } if nalType >= typeSingleNALULowBound && nalType <= typeSingleNALUHighBound { l.writeWithPrefix(payload) } else { switch nalType { case typeSTAPA: l.handleSTAPA(payload) case typeFUA: l.handleFUA(payload) case typeSTAPB: panic("STAP-B type unsupported") case typeMTAP16: panic("MTAP16 type unsupported") case typeMTAP24: panic("MTAP24 type unsupported") case typeFUB: panic("FU-B type unsupported") default: panic("unsupported type") } } markerIsSet, err := rtp.Marker(buf[:n]) if err != nil { return fmt.Errorf("could not get marker bit, failed with err: %v\n", err) } if markerIsSet { l.buf.WriteTo(dst) l.buf.Reset() } } return nil } // handleSTAPA parses NAL units from an aggregation packet and writes // them to the Lexers buffer buf. func (l *RTPLexer) handleSTAPA(d []byte) { for i := 1; i < len(d); { size := int(binary.BigEndian.Uint16(d[i:])) // Skip over NAL unit size. const sizeOfFieldLen = 2 i += sizeOfFieldLen // Get the NALU. nalu := d[i : i+size] i += size l.writeWithPrefix(nalu) } } // handleFUA parses NAL units from fragmentation packets and writes // them to the Lexer's buf. func (l *RTPLexer) handleFUA(d []byte) { // Get start and end indiciators from FU header. const FUHeadIdx = 1 start := d[FUHeadIdx]&0x80 != 0 end := d[FUHeadIdx]&0x40 != 0 // If start, form new header, skip FU indicator only and set first byte to // new header. Otherwise, skip over both FU indicator and FU header. if start { const FUIndicatorIdx = 0 newHead := (d[FUIndicatorIdx] & 0xe0) | (d[FUHeadIdx] & 0x1f) d = d[1:] d[0] = newHead } else { d = d[2:] } if start { if end { panic("bad fragmentation packet") } l.frag = true l.writeWithPrefix(d) } else { if end { l.frag = false } l.writeNoPrefix(d) } } // write writes a NAL unit to the Lexer's buf in byte stream format using the // start code. func (l *RTPLexer) writeWithPrefix(d []byte) { const prefix = "\x00\x00\x00\x01" l.buf.Write([]byte(prefix)) l.buf.Write(d) } // writeNoPrefix writes data to the Lexer's buf. This is used for non start // fragmentations of a NALU. func (l *RTPLexer) writeNoPrefix(d []byte) { l.buf.Write(d) }