Coverage for coherence/transcoder.py : 34%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
# -*- coding: utf-8 -*-
# Licensed under the MIT license # http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <coherence@beebits.net>
a Coherence MediaServer
using GStreamer pipelines for the actually work and feeding the output into a http response """
""" just a class to inherit from and which we can look for upon creating our list of available transcoders """
"sinkpadtemplate", Gst.PadDirection.SINK, Gst.PadPresence.ALWAYS, Gst.Caps.new_any())
"srcpadtemplate", Gst.PadDirection.SRC, Gst.PadPresence.ALWAYS, Gst.Caps.new_any())
Gst.Element.__init__(self) log.LogAble.__init__(self) self.sinkpad = Gst.Pad.new_from_template( self._sinkpadtemplate, "sink") self.srcpad = Gst.Pad.new_from_template( self._srcpadtemplate, "src") self.add_pad(self.sinkpad) self.add_pad(self.srcpad)
self.sinkpad.set_chain_function_full(self.chainfunc)
self.buffer = '' self.buffer_size = 0 self.proxy = False self.got_new_segment = False self.closed = False
return \ struct.pack( ">L4s", 32, 'ftyp') + \ b"mp42\x00\x00\x00\x00mp42mp41isomiso2"
if self.proxy: # we are in proxy mode already self.srcpad.push(buffer) return Gst.FlowReturn.OK
self.buffer = self.buffer + buffer.data if not self.buffer_size: try: self.buffer_size, a_type = \ struct.unpack(">L4s", self.buffer[:8]) except Exception: return Gst.FlowReturn.OK
if len(self.buffer) < self.buffer_size: # we need to buffer more return Gst.FlowReturn.OK
buffer = self.buffer[self.buffer_size:] fake_header = self.get_fake_header() n_buf = Gst.Buffer(fake_header + buffer) self.proxy = True self.srcpad.push(n_buf)
return Gst.FlowReturn.OK
"sinkpadtemplate", Gst.PadDirection.SINK, Gst.PadPresence.ALWAYS, Gst.Caps.new_any())
Gst.Element.__init__(self) log.LogAble.__init__(self) self.sinkpad = Gst.Pad.new_from_template( self._sinkpadtemplate, "sink") self.add_pad(self.sinkpad)
self.sinkpad.set_chain_function_full(self.chainfunc) self.sinkpad.set_event_function_full(self.eventfunc) self.destination = destination self.request = request
if self.destination is not None: self.destination = open(self.destination, 'wb') self.buffer = '' self.data_size = 0 self.got_new_segment = False self.closed = False
size = buffer.get_size() buf_data = buffer.extract_dup(0, size) if not isinstance(buf_data, bytes): buf = buffer.encode('ascii') if self.closed: return Gst.FlowReturn.OK if self.destination is not None: self.destination.write(buf_data) elif self.request is not None: self.buffer += buf_data if len(self.buffer) > 200000: self.request.write(self.buffer) self.buffer = b'' else: self.buffer += buffer.data
self.data_size += size return Gst.FlowReturn.OK
if event.type == Gst.Event.new_stream_start('').type: if not self.got_new_segment: self.got_new_segment = True else: self.closed = True elif event.type == Gst.Event.new_eos().type: if self.destination is not None: self.destination.close() elif self.request is not None: if len(self.buffer) > 0: self.request.write(self.buffer) self.request.finish() return True
self.pipeline_description = pipeline self.contentType = content_type self.requests = [] # if stream has a streamheader (something that has to be prepended # before any data), then it will be a tuple of GstBuffers self.streamheader = None self.parse_pipeline() resource.Resource.__init__(self) log.LogAble.__init__(self)
self.pipeline = Gst.parse_launch(self.pipeline_description) self.appsink = Gst.ElementFactory.make("appsink", "sink") self.appsink.set_property('emit-signals', True) self.pipeline.add(self.appsink) enc = self.pipeline.get_by_name("enc") enc.link(self.appsink) self.appsink.connect("new-preroll", self.new_preroll) self.appsink.connect("new-buffer", self.new_buffer) self.appsink.connect("eos", self.eos)
self.info("GStreamerPipeline start %r %r", request, self.pipeline_description) self.requests.append(request) self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish() d.addBoth(self.requestFinished, request)
self.debug("new preroll") buffer = appsink.emit('pull-preroll') if not self.streamheader: # check caps for streamheader buffer caps = buffer.get_caps() s = caps[0] if "streamheader" in s: self.streamheader = s["streamheader"] self.debug("setting streamheader") for r in self.requests: self.debug("writing streamheader") for h in self.streamheader: r.write(h.data) for r in self.requests: self.debug("writing preroll") r.write(buffer.data)
buffer = appsink.emit('pull-buffer') if not self.streamheader: # check caps for streamheader buffers caps = buffer.get_caps() s = caps[0] if "streamheader" in s: self.streamheader = s["streamheader"] self.debug("setting streamheader") for r in self.requests: self.debug("writing streamheader") for h in self.streamheader: r.write(h.data) for r in self.requests: r.write(buffer.data)
self.info("eos") for r in self.requests: r.finish() self.cleanup()
self.info('getChild %s, %s', name, request) return self
self.info('render GET %r', request) request.setResponseCode(200) if hasattr(self, 'contentType'): request.setHeader(b'Content-Type', self.contentType) request.write(b'')
headers = request.getAllHeaders() if ('connection' in headers and headers['connection'] == 'close'): pass if self.requests: if self.streamheader: self.debug("writing streamheader") for h in self.streamheader: request.write(h.data) self.requests.append(request) else: self.parse_pipeline() self.start(request) return server.NOT_DONE_YET
self.info('render HEAD %r', request) request.setResponseCode(200) request.setHeader(b'Content-Type', self.contentType) request.write(b'')
self.info("requestFinished %r", result) """ we need to find a way to destroy the pipeline here """ # from twisted.internet import reactor # reactor.callLater(0, self.pipeline.set_state, Gst.State.NULL) self.requests.remove(request) if not self.requests: self.cleanup()
t = message.type print("on_message", t) if t == Gst.Message.ERROR: # err, debug = message.parse_error() # print "Error: %s" % err, debug self.cleanup() elif t == Gst.Message.EOS: self.cleanup()
self.info("pipeline cleanup") self.pipeline.set_state(Gst.State.NULL) self.requests = [] self.streamheader = None
uri = 'file://' + urllib.parse.quote(uri) # FIXME
self.info('getChild %s, %s', name, request) return self
self.info('render GET %r', request) request.setResponseCode(200) if self.contentType is not None: request.setHeader(b'Content-Type', self.contentType) request.write(b'')
headers = request.getAllHeaders() if ('connection' in headers and headers['connection'] == 'close'): pass
self.start(request) return server.NOT_DONE_YET
self.info('render HEAD %r', request) request.setResponseCode(200) request.setHeader(b'Content-Type', self.contentType) request.write(b'')
self.info("requestFinished %r", result) """ we need to find a way to destroy the pipeline here """ # from twisted.internet import reactor # reactor.callLater(0, self.pipeline.set_state, Gst.State.NULL) GObject.idle_add(self.cleanup)
t = message.type print("on_message", t) if t == Gst.Message.ERROR: # err, debug = message.parse_error() # print "Error: %s" % err, debug self.cleanup() elif t == Gst.Message.EOS: self.cleanup()
self.pipeline.set_state(Gst.State.NULL)
"""This method should be sub classed for each class which inherits from BaseTranscoder""" pass
self.info("PCMTranscoder start %r %r", request, self.uri) self.pipeline = Gst.parse_launch( "%s ! decodebin ! audioconvert name=conv" % self.uri)
conv = self.pipeline.get_by_name('conv') caps = Gst.Caps.from_string( "audio/x-raw-int,rate=44100,endianness=4321," "channels=2,width=16,depth=16,signed=true") # FIXME: UGLY. 'filter' is a python builtin! filter = Gst.ElementFactory.make("capsfilter", "filter") filter.set_property("caps", caps) self.pipeline.add(filter) conv.link(filter)
sink = DataSink(destination=self.destination, request=request) self.pipeline.add(sink) filter.link(sink) self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish() d.addBoth(self.requestFinished)
self.info("start %r", request) self.pipeline = Gst.parse_launch( "%s ! decodebin ! audioconvert ! wavenc name=enc" % self.uri) enc = self.pipeline.get_by_name('enc') sink = DataSink(destination=self.destination, request=request) self.pipeline.add(sink) enc.link(sink) # bus = self.pipeline.get_bus() # bus.connect('message', self.on_message) self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish() d.addBoth(self.requestFinished)
self.info("start %r", request) self.pipeline = Gst.parse_launch( "%s ! decodebin ! audioconvert ! lame name=enc" % self.uri) enc = self.pipeline.get_by_name('enc') sink = DataSink(destination=self.destination, request=request) self.pipeline.add(sink) enc.link(sink) self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish() d.addBoth(self.requestFinished)
""" Only works if H264 inside Quicktime/MP4 container is input Source has to be a valid uri """
self.info("start %r", request) self.pipeline = Gst.parse_launch( "%s ! qtdemux name=d ! queue ! h264parse ! " "mp4mux name=mux d. ! queue ! mux." % self.uri) mux = self.pipeline.get_by_name('mux') sink = DataSink(destination=self.destination, request=request) self.pipeline.add(sink) mux.link(sink) self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish() d.addBoth(self.requestFinished)
self.info("start %r", request) # FIXME - mpeg2enc self.pipeline = Gst.parse_launch( "mpegtsmux name=mux %s ! decodebin2 name=d ! queue ! " "ffmpegcolorspace ! mpeg2enc ! queue ! mux. d. ! " "queue ! audioconvert ! twolame ! queue ! mux." % self.uri) enc = self.pipeline.get_by_name('mux') sink = DataSink(destination=self.destination, request=request) self.pipeline.add(sink) enc.link(sink) self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish() d.addBoth(self.requestFinished)
""" should create a valid thumbnail according to the DLNA spec neither width nor height must exceed 160px """
self.info("start %r", request) """ # what we actually want here is a pipeline that calls # us when it knows about the size of the original image, # and allows us now to adjust the caps-filter with the # calculated values for width and height new_width = 160 new_height = 160 if original_width > 160: new_heigth = \ int(float(original_height) * (160.0/float(original_width))) if new_height > 160: new_width = \ int(float(new_width) * (160.0/float(new_height))) elif original_height > 160: new_width = \ int(float(original_width) * (160.0/float(original_height))) """ try: type = request.args['type'][0] except IndexError: type = 'jpeg' if type == 'png': self.pipeline = Gst.parse_launch( "%s ! decodebin2 ! videoscale ! " "video/x-raw-yuv,width=160,height=160 ! pngenc name=enc" % self.uri) self.contentType = 'image/png' else: self.pipeline = Gst.parse_launch( "%s ! decodebin2 ! videoscale ! " "video/x-raw-yuv,width=160,height=160 ! jpegenc name=enc" % self.uri) self.contentType = 'image/jpeg' enc = self.pipeline.get_by_name('enc') sink = DataSink(destination=self.destination, request=request) self.pipeline.add(sink) enc.link(sink) self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish() d.addBoth(self.requestFinished)
""" a generic Transcode based on GStreamer
the pipeline which will be parsed upon calling the start method, as to be set as the attribute pipeline_description to the instantiated class
same for the attribute contentType """
if self.pipeline_description is None: raise NotImplementedError( "Warning: operation cancelled. You must set a value for " "GStreamerTranscoder.pipeline_description") self.info("start %r", request) self.pipeline = Gst.parse_launch(self.pipeline_description % self.uri) enc = self.pipeline.get_by_name('mux') sink = DataSink(destination=self.destination, request=request) self.pipeline.add(sink) enc.link(sink) self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish() d.addBoth(self.requestFinished)
self.caller = caller
print("pp connection made")
# print "outReceived with %d bytes!" % len(data) self.caller.write_data(data)
# print "errReceived! with %d bytes!" % len(data) print("pp (err):", data.strip())
# print "inConnectionLost! stdin is closed! (we probably did it)" pass
# print "outConnectionLost! The child closed their stdout!" pass
# print "errConnectionLost! The child closed their stderr." pass
print("processEnded, status %d" % status_object.value.exitCode) print("processEnded quitting") self.caller.ended = True self.caller.write_data('')
self.pipeline = pipeline self.request = request self.process = None self.written = 0 self.data = '' self.ended = False request.registerProducer(self, 0)
if data: # print "write %d bytes of data" % len(data) self.written += len(data) # this .write will spin the reactor, calling .doWrite and then # .resumeProducing again, so be prepared for a re-entrant call self.request.write(data) if self.request and self.ended: print("closing") self.request.unregisterProducer() self.request.finish() self.request = None
# print "resumeProducing", self.request if not self.request: return if self.process is None: argv = self.pipeline.split() executable = argv[0] argv[0] = os.path.basename(argv[0]) from twisted.internet import reactor self.process = reactor.spawnProcess(ExternalProcessProtocol(self), executable, argv, {})
pass
print("stopProducing", self.request) self.request.unregisterProducer() self.process.loseConnection() self.request.finish() self.request = None
return self
print("ExternalProcessPipeline render") if self.pipeline_description is None: raise NotImplementedError( "Warning: operation cancelled. You must set a value for " "ExternalProcessPipeline.pipeline_description") if self.contentType is not None: request.setHeader(b'Content-Type', self.contentType)
ExternalProcessProducer(self.pipeline_description % self.uri, request) return server.NOT_DONE_YET
""" singleton class which holds information about all available transcoders
they are put into a transcoders dict with their id as the key
we collect all internal transcoders by searching for all subclasses of InternalTranscoder, the class will be the value
transcoders defined in the config are parsed and stored as a dict in the transcoders dict
in the config a transcoder description has to look like this:
*** preliminary, will be extended and might even change without further notice ***
<transcoder> <pipeline>%s ...</pipeline> <!-- we need a %s here to insert the source uri (or can we have all the times pipelines we can prepend with a '%s !') and an element named mux where we can attach our sink --> <type>gstreamer</type> <!-- could be gstreamer or process --> <name>mpegts</name> <target>video/mpeg</target> <fourth_field> <!-- value for the 4th field of the protocolInfo phalanx, default is '*' --> </transcoder>
"""
""" creates the singleton """ obj.coherence = kwargs['coherence']
""" initializes the class
it should be called at least once with the main coherence class passed as an argument, so we have access to the config """
# FIXME: is anyone checking if all keys are given ? " missing placehoder '%%s' in 'pipeline'", transcoder)
except UnicodeEncodeError: self.warning("Can't create transcoder %r:" " the 'name' contains non-ascii letters", transcoder) continue
transcoder['target'], transcoder['pipeline']) transcoder['target'], transcoder['pipeline']) else: self.warning("unknown transcoder type %r", transcoder_type) continue
# FIXME reduce that to info later
# FIXME:why do we specify the name when trying to get it?
""" try to find a transcoder provided by the backend and return that here, if there isn't one continue with the ones provided by the config or the internal ones """ pass
|