Example #1
0
/**
 * EME abstraction and event handler used to communicate with the Content-
 * Description-Module (CDM).
 *
 * The communication with backend key-servers is not handled directly by this
 * module but through the given "KeySystems".
 *
 * A system has to expose the given interface:
 * interface KeySystem {
 *   readonly attribute string type;
 *
 *   Promise<AB> getLicense((AB) challenge);
 *   AB extractInitData(AB);
 * }
 * with AB = ArrayBuffer or ArrayBufferView
 *
 * The `extraInitData` method is not mandatory and used to pre-process the
 * initData vector injected into the CDM. The `getLicense` method is used to
 * serve the license encapsulated in a promise to support asynchronous license
 * fetching. The challenge buffer sent by the CDM is directly passed as first
 * argument of this method.
 *
 * The EME handler can be given one or multiple systems and will choose the
 * appropriate one supported by the user's browser.
 */
function createEME(video, keySystems, errorStream) {
  if (__DEV__) {
    keySystems.forEach((ks) => assert.iface(ks, "keySystem", {
      getLicense: "function",
      type: "string",
    }));
  }

  function handleEncryptedEvents(encryptedEvent, { keySystem, keySystemAccess }) {
    if (keySystem.persistentLicense) {
      $storedSessions.setStorage(keySystem.licenseStorage);
    }

    log.info("eme: encrypted event", encryptedEvent);
    return createAndSetMediaKeys(video, keySystem, keySystemAccess)
      .mergeMap((mediaKeys) =>
        manageSessionCreation(
          mediaKeys,
          keySystemAccess.getConfiguration(),
          keySystem,
          encryptedEvent.initDataType,
          new Uint8Array(encryptedEvent.initData),
          errorStream
        )
      );
  }

  return combineLatestStatic(
    onEncrypted(video),
    findCompatibleKeySystem(keySystems)
  )
    .take(1)
    .mergeMap(([evt, ks]) => handleEncryptedEvents(evt, ks));
}
Example #2
0
  loadVideo(options = {}) {
    options = this._parseOptions(options);
    log.info("loadvideo", options);

    const {
      url,
      keySystems,
      subtitles,
      timeFragment,
      autoPlay,
      transport,
    } = options;

    this.stop();
    this.frag = timeFragment;
    this.playing.next(autoPlay);

    const pipelines = this.createPipelines(transport, {
      audio: { cache: new InitializationSegmentCache() },
      video: { cache: new InitializationSegmentCache() },
    });

    const { adaptive, timings, video: videoElement } = this;
    const stream = Stream({
      url,
      keySystems,
      subtitles,
      timings,
      timeFragment,
      adaptive,
      pipelines,
      videoElement,
      autoPlay,
    })
      .publish();

    const stalled = filterStreamByType(stream, "stalled").startWith(null);
    const loaded = filterStreamByType(stream, "loaded").take(1).share();

    const stateChanges = loaded.mapTo(PLAYER_LOADED)
      .concat(combineLatestStatic(this.playing, stalled, calcPlayerState))
      .distinctUntilChanged()
      .startWith(PLAYER_LOADING);

    const playChanges = on(videoElement, ["play", "pause"]);

    const subs = this.subscriptions = new Subscription();
    subs.add(playChanges.subscribe(this._playPauseNext.bind(this), noop));
    subs.add(stateChanges.subscribe(this._setPlayerState.bind(this), noop));
    subs.add(timings.subscribe(this._triggerTimeChange.bind(this), noop));
    subs.add(stream.subscribe(
      this._streamNext.bind(this),
      this._streamError.bind(this),
      this._streamComplete.bind(this)
    ));
    subs.add(stream.connect());

    // _unsubscribe may have been called synchronously on early disposable
    if (!this.subscriptions) {
      subs.unsubscribe();
    } else {
      this._triggerTimeChange();
    }

    return loaded;
  }
Example #3
0
function Buffer({
  bufferType,   // Buffer type (audio, video, text)
  sourceBuffer, // SourceBuffer object
  adaptation,   // Adaptation buffered
  pipeline,     // Segment pipeline
  adapters,     // { representations, bufferSizes } observables
  timings,      // Timings observable
  seekings,     // Seekings observable
}) {

  const isAVBuffer = (
    bufferType == "audio" ||
    bufferType == "video"
  );

  const outOfIndexStream = new Subject();

  // safety level (low and high water mark) size of buffer that won't
  // be flushed when switching representation for smooth transitions
  // and avoiding buffer underflows
  const LOW_WATER_MARK_PAD  = bufferType == "video" ? 4 : 1;
  const HIGH_WATER_MARK_PAD = bufferType == "video" ? 6 : 1;

  const { representations, bufferSizes } = adapters;
  const ranges = new BufferedRanges();

  const updateEnd = mergeStatic(
    on(sourceBuffer, "update"),
    on(sourceBuffer, "error").map((evt) => {
      if (evt.target && evt.target.error) {
        throw evt.target.error;
      } else {
        const errMessage = "buffer: error event";
        log.error(errMessage, evt);
        throw new Error(errMessage);
      }
    })
  ).share();

  // prevents unceasing add/remove event listeners by sharing an
  // open updateEnd stream (hackish)
  const mutedUpdateEnd = updateEnd
    .ignoreElements()
    .startWith(true);

  function appendBuffer(blob) {
    sourceBuffer.appendBuffer(blob);
    return first(updateEnd);
  }

  function removeBuffer({ start, end }) {
    sourceBuffer.remove(start, end);
    return first(updateEnd);
  }

  function lockedBufferFunction(func) {
    return function(data) {
      return defer(() => {
        if (sourceBuffer.updating) {
          return first(updateEnd).flatMap(() => func(data));
        } else {
          return func(data);
        }
      });
    };
  }

  const lockedAppendBuffer = lockedBufferFunction(appendBuffer);
  const lockedRemoveBuffer = lockedBufferFunction(removeBuffer);

  // Buffer garbage collector algorithm. Tries to free up some part of
  // the ranges that are distant from the current playing time.
  // See: https://w3c.github.io/media-source/#sourcebuffer-prepare-append
  function selectGCedRanges({ts, buffered}, gcGap) {
    const innerRange  = buffered.getRange(ts);
    const outerRanges = buffered.getOuterRanges(ts);

    const cleanedupRanges = [];

    // start by trying to remove all ranges that do not contain the
    // current time and respect the gcGap
    for (let i = 0; i < outerRanges.length; i++) {
      const outerRange = outerRanges[i];
      if (ts - gcGap < outerRange.end) {
        cleanedupRanges.push(outerRange);
      }
      else if (ts + gcGap > outerRange.start) {
        cleanedupRanges.push(outerRange);
      }
    }

    // try to clean up some space in the current range
    if (innerRange) {
      log.debug("buffer: gc removing part of inner range", cleanedupRanges);
      if (ts - gcGap > innerRange.start) {
        cleanedupRanges.push({
          start: innerRange.start,
          end: ts - gcGap,
        });
      }

      if (ts + gcGap < innerRange.end) {
        cleanedupRanges.push({
          start: ts + gcGap,
          end: innerRange.end,
        });
      }
    }

    return cleanedupRanges;
  }

  function bufferGarbageCollector() {
    log.warn("buffer: running garbage collector");
    return timings.take(1).flatMap((timing) => {
      let cleanedupRanges = selectGCedRanges(timing, GC_GAP_CALM);

      // more aggressive GC if we could not find any range to clean
      if (cleanedupRanges.length === 0) {
        cleanedupRanges = selectGCedRanges(timing, GC_GAP_BEEFY);
      }

      log.debug("buffer: gc cleaning", cleanedupRanges);
      return from(cleanedupRanges.map(lockedRemoveBuffer)).concatAll();
    });
  }

  function doAppendBufferOrGC(pipelineData) {
    const blob = pipelineData.parsed.blob;
    return lockedAppendBuffer(blob)
      .catch((err) => {
        // launch our garbage collector and retry on
        // QuotaExceededError
        if (err.name !== "QuotaExceededError") {
          throw err;
        }

        return bufferGarbageCollector().flatMap(
          () => lockedAppendBuffer(blob));
      });
  }

  function getSegmentsListToInject(segmentIndex,
                                   adaptation,
                                   representation,
                                   buffered,
                                   timing,
                                   bufferSize,
                                   withInitSegment) {
    const segments = [];

    if (withInitSegment) {
      log.debug("add init segment", bufferType);
      segments.push(segmentIndex.getInitSegment());
    }

    if (timing.readyState === 0) {
      return segments;
    }

    const timestamp = timing.ts;

    // wanted buffer size calculates the actual size of the buffer
    // we want to ensure, taking into account the duration and the
    // potential live gap.
    const endDiff = (timing.duration || Infinity) - timestamp;
    const wantedBufferSize = Math.max(0,
      Math.min(bufferSize, timing.liveGap, endDiff));

    // the ts padding is the actual time gap that we want to apply
    // to our current timestamp in order to calculate the list of
    // segments to inject.
    let timestampPadding;
    const bufferGap = buffered.getGap(timestamp);
    if (bufferGap > LOW_WATER_MARK_PAD && bufferGap < Infinity) {
      timestampPadding = Math.min(bufferGap, HIGH_WATER_MARK_PAD);
    } else {
      timestampPadding = 0;
    }

    // in case the current buffered range has the same bitrate as
    // the requested representation, we can a optimistically discard
    // all the already buffered data by using the
    const currentRange = ranges.getRange(timestamp);
    if (currentRange && currentRange.bitrate === representation.bitrate) {
      const rangeEndGap = Math.floor(currentRange.end - timestamp);
      if (rangeEndGap > timestampPadding) {
        timestampPadding = rangeEndGap;
      }
    }

    // given the current timestamp and the previously calculated
    // time gap and wanted buffer size, we can retrieve the list of
    // segments to inject in our pipelines.
    const mediaSegments = segmentIndex.getSegments(timestamp,
                                                 timestampPadding,
                                                 wantedBufferSize);

    return segments.concat(mediaSegments);
  }

  function createRepresentationBuffer(representation) {
    const segmentIndex = new IndexHandler(adaptation, representation);
    const queuedSegments = new SimpleSet();

    function filterAlreadyLoaded(segment) {
      // if this segment is already in the pipeline
      const isInQueue = queuedSegments.test(segment.getId());
      if (isInQueue) {
        return false;
      }

      // segment without time info are usually init segments or some
      // kind of metadata segment that we never filter out
      if (segment.isInitSegment() || segment.getTime() < 0) {
        return true;
      }

      const time     = segmentIndex.scale(segment.getTime());
      const duration = segmentIndex.scale(segment.getDuration());

      const range = ranges.hasRange(time, duration);
      if (range) {
        return range.bitrate * BITRATE_REBUFFERING_RATIO < segment.getRepresentation().bitrate;
      } else {
        return true;
      }
    }

    function doInjectSegments([timing, bufferSize], injectCount) {
      const nativeBufferedRanges = new BufferedRanges(sourceBuffer.buffered);

      // makes sure our own buffered ranges representation stay in
      // sync with the native one
      if (isAVBuffer) {
        if (!ranges.equals(nativeBufferedRanges)) {
          log.debug("intersect new buffer", bufferType);
          ranges.intersect(nativeBufferedRanges);
        }
      }

      let injectedSegments;
      try {
        // filter out already loaded and already queued segments
        const withInitSegment = (injectCount === 0);
        injectedSegments = getSegmentsListToInject(segmentIndex,
                                                   adaptation,
                                                   representation,
                                                   nativeBufferedRanges,
                                                   timing,
                                                   bufferSize,
                                                   withInitSegment);

        injectedSegments = injectedSegments.filter(filterAlreadyLoaded);
      }
      catch(err) {
        // catch OutOfIndexError errors thrown by when we try to
        // access to non available segments. Reinject this error
        // into the main buffer observable so that it can be treated
        // upstream
        if (err instanceof OutOfIndexError) {
          outOfIndexStream.next({ type: "out-of-index", value: err });
          return empty();
        }
        else {
          throw err;
        }

        // unreachable
        assert(false);
      }

      // queue all segments injected in the observable
      for (let i = 0; i < injectedSegments.length; i++) {
        queuedSegments.add(injectedSegments[i].getId());
      }

      return injectedSegments;
    }

    function doUnqueueAndUpdateRanges(pipelineData) {
      const { segment, parsed } = pipelineData;
      queuedSegments.remove(segment.getId());

      // change the timescale if one has been extracted from the
      // parsed segment (SegmentBase)
      const timescale = parsed.timescale;
      if (timescale) {
        segmentIndex.setTimescale(timescale);
      }

      const { nextSegments, currentSegment } = parsed;
      // added segments are values parsed from the segment metadata
      // that should be added to the segmentIndex.
      let addedSegments;
      if (nextSegments) {
        addedSegments = segmentIndex.insertNewSegments(nextSegments,
                                                       currentSegment);
      } else {
        addedSegments = [];
      }

      // current segment timings informations are used to update
      // ranges informations
      if (currentSegment) {
        ranges.insert(representation.bitrate,
          segmentIndex.scale(currentSegment.ts),
          segmentIndex.scale(currentSegment.ts + currentSegment.d));
      }

      return {
        type: "pipeline",
        value: { bufferType, addedSegments, ...pipelineData },
      };
    }

    const segmentsPipeline = combineLatestStatic(
      timings,
      bufferSizes,
      mutedUpdateEnd
    )
      .flatMap(doInjectSegments)
      .concatMap((segment) => pipeline({ segment }))
      .concatMap(
        doAppendBufferOrGC,
        doUnqueueAndUpdateRanges
      );

    return mergeStatic(segmentsPipeline, outOfIndexStream).catch((err) => {
      if (err.code !== 412) {
        throw err;
      }

      // 412 Precondition Failed request errors do not cause the
      // buffer to stop but are re-emitted in the stream as
      // "precondition-failed" type. They should be handled re-
      // adapting the live-gap that the player is holding
      return Observable.of({ type: "precondition-failed", value: err })
        .concat(timer(2000))
        .concat(createRepresentationBuffer(representation));
    })
      .startWith({
        type: "buffer",
        value: { bufferType, adaptation, representation },
      });
  }

  return combineLatestStatic(representations, seekings, (rep) => rep)
    .switchMap(createRepresentationBuffer);
}
Example #4
0
  function createRepresentationBuffer(representation) {
    const segmentIndex = new IndexHandler(adaptation, representation);
    const queuedSegments = new SimpleSet();

    function filterAlreadyLoaded(segment) {
      // if this segment is already in the pipeline
      const isInQueue = queuedSegments.test(segment.getId());
      if (isInQueue) {
        return false;
      }

      // segment without time info are usually init segments or some
      // kind of metadata segment that we never filter out
      if (segment.isInitSegment() || segment.getTime() < 0) {
        return true;
      }

      const time     = segmentIndex.scale(segment.getTime());
      const duration = segmentIndex.scale(segment.getDuration());

      const range = ranges.hasRange(time, duration);
      if (range) {
        return range.bitrate * BITRATE_REBUFFERING_RATIO < segment.getRepresentation().bitrate;
      } else {
        return true;
      }
    }

    function doInjectSegments([timing, bufferSize], injectCount) {
      const nativeBufferedRanges = new BufferedRanges(sourceBuffer.buffered);

      // makes sure our own buffered ranges representation stay in
      // sync with the native one
      if (isAVBuffer) {
        if (!ranges.equals(nativeBufferedRanges)) {
          log.debug("intersect new buffer", bufferType);
          ranges.intersect(nativeBufferedRanges);
        }
      }

      let injectedSegments;
      try {
        // filter out already loaded and already queued segments
        const withInitSegment = (injectCount === 0);
        injectedSegments = getSegmentsListToInject(segmentIndex,
                                                   adaptation,
                                                   representation,
                                                   nativeBufferedRanges,
                                                   timing,
                                                   bufferSize,
                                                   withInitSegment);

        injectedSegments = injectedSegments.filter(filterAlreadyLoaded);
      }
      catch(err) {
        // catch OutOfIndexError errors thrown by when we try to
        // access to non available segments. Reinject this error
        // into the main buffer observable so that it can be treated
        // upstream
        if (err instanceof OutOfIndexError) {
          outOfIndexStream.next({ type: "out-of-index", value: err });
          return empty();
        }
        else {
          throw err;
        }

        // unreachable
        assert(false);
      }

      // queue all segments injected in the observable
      for (let i = 0; i < injectedSegments.length; i++) {
        queuedSegments.add(injectedSegments[i].getId());
      }

      return injectedSegments;
    }

    function doUnqueueAndUpdateRanges(pipelineData) {
      const { segment, parsed } = pipelineData;
      queuedSegments.remove(segment.getId());

      // change the timescale if one has been extracted from the
      // parsed segment (SegmentBase)
      const timescale = parsed.timescale;
      if (timescale) {
        segmentIndex.setTimescale(timescale);
      }

      const { nextSegments, currentSegment } = parsed;
      // added segments are values parsed from the segment metadata
      // that should be added to the segmentIndex.
      let addedSegments;
      if (nextSegments) {
        addedSegments = segmentIndex.insertNewSegments(nextSegments,
                                                       currentSegment);
      } else {
        addedSegments = [];
      }

      // current segment timings informations are used to update
      // ranges informations
      if (currentSegment) {
        ranges.insert(representation.bitrate,
          segmentIndex.scale(currentSegment.ts),
          segmentIndex.scale(currentSegment.ts + currentSegment.d));
      }

      return {
        type: "pipeline",
        value: { bufferType, addedSegments, ...pipelineData },
      };
    }

    const segmentsPipeline = combineLatestStatic(
      timings,
      bufferSizes,
      mutedUpdateEnd
    )
      .flatMap(doInjectSegments)
      .concatMap((segment) => pipeline({ segment }))
      .concatMap(
        doAppendBufferOrGC,
        doUnqueueAndUpdateRanges
      );

    return mergeStatic(segmentsPipeline, outOfIndexStream).catch((err) => {
      if (err.code !== 412) {
        throw err;
      }

      // 412 Precondition Failed request errors do not cause the
      // buffer to stop but are re-emitted in the stream as
      // "precondition-failed" type. They should be handled re-
      // adapting the live-gap that the player is holding
      return Observable.of({ type: "precondition-failed", value: err })
        .concat(timer(2000))
        .concat(createRepresentationBuffer(representation));
    })
      .startWith({
        type: "buffer",
        value: { bufferType, adaptation, representation },
      });
  }