2019-04-30 10:08:23 +03:00
|
|
|
/*
|
|
|
|
NAME
|
|
|
|
lex.go
|
|
|
|
|
|
|
|
DESCRIPTION
|
2019-05-01 18:18:57 +03:00
|
|
|
lex.go provides a lexer for taking RTP HEVC (H265) and lexing into access units.
|
2019-04-30 10:08:23 +03:00
|
|
|
|
|
|
|
AUTHORS
|
|
|
|
Saxon A. Nelson-Milton <saxon@ausocean.org>
|
|
|
|
|
|
|
|
LICENSE
|
|
|
|
Copyright (C) 2019 the Australian Ocean Lab (AusOcean).
|
|
|
|
|
|
|
|
It is free software: you can redistribute it and/or modify them
|
|
|
|
under the terms of the GNU General Public License as published by the
|
|
|
|
Free Software Foundation, either version 3 of the License, or (at your
|
|
|
|
option) any later version.
|
|
|
|
|
|
|
|
It is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
2019-05-11 14:56:42 +03:00
|
|
|
for more details.
|
2019-04-30 10:08:23 +03:00
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
in gpl.txt. If not, see http://www.gnu.org/licenses.
|
|
|
|
*/
|
|
|
|
|
2019-05-09 05:29:20 +03:00
|
|
|
// Package h265 provides an RTP h265 lexer that can extract h265 access units
|
|
|
|
// from an RTP stream.
|
2019-04-30 10:08:23 +03:00
|
|
|
package h265
|
|
|
|
|
|
|
|
import (
|
2019-05-01 18:18:57 +03:00
|
|
|
"bytes"
|
2019-04-30 10:08:23 +03:00
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"bitbucket.org/ausocean/av/protocol/rtp"
|
|
|
|
)
|
|
|
|
|
|
|
|
// NALU types.
|
|
|
|
const (
|
|
|
|
typeAggregation = 48
|
|
|
|
typeFragmentation = 49
|
|
|
|
typePACI = 50
|
|
|
|
)
|
|
|
|
|
|
|
|
// Buffer sizes.
|
|
|
|
const (
|
|
|
|
maxAUSize = 100000
|
|
|
|
maxRTPSize = 4096
|
|
|
|
)
|
|
|
|
|
|
|
|
// Lexer is an H265 lexer.
|
|
|
|
type Lexer struct {
|
2019-05-01 18:18:57 +03:00
|
|
|
donl bool // Indicates whether DONL and DOND will be used for the RTP stream.
|
|
|
|
buf *bytes.Buffer // Holds the current access unit.
|
|
|
|
frag bool // Indicates if we're currently dealing with a fragmentation packet.
|
|
|
|
}
|
|
|
|
|
2019-05-09 05:30:17 +03:00
|
|
|
// NewLexer returns a new Lexer.
|
2019-05-01 18:18:57 +03:00
|
|
|
func NewLexer(donl bool) *Lexer {
|
|
|
|
return &Lexer{
|
|
|
|
donl: donl,
|
2019-05-19 14:44:41 +03:00
|
|
|
buf: bytes.NewBuffer(make([]byte, 0, maxAUSize)),
|
|
|
|
}
|
2019-04-30 10:08:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lex continually reads RTP packets from the io.Reader src and lexes into
|
|
|
|
// access units which are written to the io.Writer dst. Lex expects that for
|
|
|
|
// each read from src, a single RTP packet is received.
|
|
|
|
func (l *Lexer) Lex(dst io.Writer, src io.Reader, delay time.Duration) error {
|
|
|
|
buf := make([]byte, maxRTPSize)
|
|
|
|
for {
|
|
|
|
n, err := src.Read(buf)
|
|
|
|
switch err {
|
2019-05-09 05:31:05 +03:00
|
|
|
case nil: // Do nothing.
|
2019-04-30 10:08:23 +03:00
|
|
|
case io.EOF:
|
2019-05-01 10:15:22 +03:00
|
|
|
return nil
|
2019-04-30 10:08:23 +03:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("source read error: %v\n", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get payload from RTP packet.
|
|
|
|
payload, err := rtp.Payload(buf[:n])
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not get rtp payload, failed with err: %v\n", err)
|
|
|
|
}
|
2019-05-01 10:15:22 +03:00
|
|
|
nalType := (payload[0] >> 1) & 0x3f
|
2019-04-30 10:08:23 +03:00
|
|
|
|
2019-05-01 18:18:57 +03:00
|
|
|
// If not currently fragmented then we ignore current write.
|
|
|
|
if l.frag && nalType != typeFragmentation {
|
|
|
|
l.buf.Reset()
|
|
|
|
l.frag = false
|
2019-04-30 10:08:23 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch nalType {
|
|
|
|
case typeAggregation:
|
|
|
|
l.handleAggregation(payload)
|
|
|
|
case typeFragmentation:
|
|
|
|
l.handleFragmentation(payload)
|
|
|
|
case typePACI:
|
|
|
|
l.handlePACI(payload)
|
|
|
|
default:
|
2019-05-01 18:18:57 +03:00
|
|
|
l.writeWithPrefix(payload)
|
2019-04-30 10:08:23 +03:00
|
|
|
}
|
|
|
|
|
2019-05-09 07:45:11 +03:00
|
|
|
markerIsSet, err := rtp.Marker(buf[:n])
|
2019-04-30 10:08:23 +03:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not get marker bit, failed with err: %v\n", err)
|
|
|
|
}
|
|
|
|
|
2019-05-09 07:45:11 +03:00
|
|
|
if markerIsSet {
|
2019-05-01 18:18:57 +03:00
|
|
|
_, err := l.buf.WriteTo(dst)
|
2019-04-30 10:08:23 +03:00
|
|
|
if err != nil {
|
|
|
|
// TODO: work out what to do here.
|
|
|
|
}
|
2019-05-01 18:18:57 +03:00
|
|
|
l.buf.Reset()
|
2019-04-30 10:08:23 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-05-09 05:33:06 +03:00
|
|
|
// handleAggregation parses NAL units from an aggregation packet and writes
|
2019-04-30 10:08:23 +03:00
|
|
|
// them to the Lexers buffer buf.
|
|
|
|
func (l *Lexer) handleAggregation(d []byte) {
|
|
|
|
idx := 2
|
|
|
|
for idx < len(d) {
|
2019-05-01 18:18:57 +03:00
|
|
|
if l.donl {
|
2019-04-30 10:08:23 +03:00
|
|
|
switch idx {
|
|
|
|
case 2:
|
|
|
|
idx += 2
|
|
|
|
default:
|
2019-05-09 05:33:40 +03:00
|
|
|
idx++
|
2019-04-30 10:08:23 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
size := int(binary.BigEndian.Uint16(d[idx:]))
|
|
|
|
idx += 2
|
|
|
|
nalu := d[idx : idx+size]
|
|
|
|
idx += size
|
2019-05-01 18:18:57 +03:00
|
|
|
l.writeWithPrefix(nalu)
|
2019-04-30 10:08:23 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleFragmentation parses NAL units from fragmentation packets and writes
|
|
|
|
// them to the Lexer's buf.
|
|
|
|
func (l *Lexer) handleFragmentation(d []byte) {
|
2019-05-01 18:18:57 +03:00
|
|
|
// Get start and end indiciators from FU header.
|
|
|
|
start := d[2]&0x80 != 0
|
|
|
|
end := d[2]&0x40 != 0
|
|
|
|
|
2019-05-20 14:19:50 +03:00
|
|
|
b1 := (d[0] & 0x81) | ((d[2] & 0x3f) << 1)
|
|
|
|
b2 := d[1]
|
2019-05-19 10:51:41 +03:00
|
|
|
if start {
|
2019-05-19 14:44:41 +03:00
|
|
|
d = d[1:]
|
|
|
|
if l.donl {
|
|
|
|
d = d[2:]
|
|
|
|
}
|
2019-05-20 14:19:50 +03:00
|
|
|
d[0] = b1
|
|
|
|
d[1] = b2
|
2019-05-19 14:44:41 +03:00
|
|
|
} else {
|
|
|
|
d = d[3:]
|
|
|
|
if l.donl {
|
|
|
|
d = d[2:]
|
|
|
|
}
|
2019-05-09 05:37:46 +03:00
|
|
|
}
|
|
|
|
|
2019-04-30 11:38:41 +03:00
|
|
|
switch {
|
2019-05-01 18:18:57 +03:00
|
|
|
case start && !end:
|
|
|
|
l.frag = true
|
2019-05-19 14:44:41 +03:00
|
|
|
l.writeWithPrefix(d)
|
2019-05-01 18:18:57 +03:00
|
|
|
case !start && end:
|
|
|
|
l.frag = false
|
2019-04-30 11:38:41 +03:00
|
|
|
fallthrough
|
2019-05-01 18:18:57 +03:00
|
|
|
case !start && !end:
|
2019-05-09 05:37:46 +03:00
|
|
|
l.writeNoPrefix(d)
|
2019-04-30 11:38:41 +03:00
|
|
|
default:
|
2019-05-01 16:49:16 +03:00
|
|
|
panic("bad fragmentation packet")
|
2019-04-30 11:38:41 +03:00
|
|
|
}
|
2019-04-30 10:08:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// handlePACI will handl PACI packets
|
|
|
|
//
|
|
|
|
// TODO: complete this
|
|
|
|
func (l *Lexer) handlePACI(d []byte) {
|
|
|
|
panic("unsupported nal type")
|
|
|
|
}
|
|
|
|
|
2019-04-30 11:38:41 +03:00
|
|
|
// write writes a NAL unit to the Lexer's buf in byte stream format using the
|
2019-04-30 10:08:23 +03:00
|
|
|
// start code.
|
2019-05-01 18:18:57 +03:00
|
|
|
func (l *Lexer) writeWithPrefix(d []byte) {
|
2019-04-30 11:38:41 +03:00
|
|
|
const prefix = "\x00\x00\x00\x01"
|
2019-05-01 18:18:57 +03:00
|
|
|
l.buf.Write([]byte(prefix))
|
|
|
|
l.buf.Write(d)
|
2019-04-30 10:08:23 +03:00
|
|
|
}
|
2019-04-30 11:38:41 +03:00
|
|
|
|
|
|
|
// writeNoPrefix writes data to the Lexer's buf. This is used for non start
|
|
|
|
// fragmentations of a NALU.
|
|
|
|
func (l *Lexer) writeNoPrefix(d []byte) {
|
2019-05-01 18:18:57 +03:00
|
|
|
l.buf.Write(d)
|
2019-04-30 11:38:41 +03:00
|
|
|
}
|