Пример #1
0
    function append(bytes, chunk) {
        let result,
            sampleList,
            i, j, k,
            samplesInfo,
            ccContent;
        let mediaInfo = chunk.mediaInfo;
        let mediaType = mediaInfo.type;
        let mimeType = mediaInfo.mimeType;
        let codecType = mediaInfo.codec || mimeType;
        if (!codecType) {
            log('No text type defined');
            return;
        }

        function createTextTrackFromMediaInfo(captionData, mediaInfo) {
            let textTrackInfo = new TextTrackInfo();
            let trackKindMap = { subtitle: 'subtitles', caption: 'captions' }; //Dash Spec has no "s" on end of KIND but HTML needs plural.
            const getKind = function () {
                let kind = (mediaInfo.roles.length > 0) ? trackKindMap[mediaInfo.roles[0]] : trackKindMap.caption;
                kind = (kind === trackKindMap.caption || kind === trackKindMap.subtitle) ? kind : trackKindMap.caption;
                return kind;
            };

            const checkTTML = function () {
                let ttml = false;
                if (mediaInfo.codec && mediaInfo.codec.search(Constants.STPP) >= 0) {
                    ttml = true;
                }
                if (mediaInfo.mimeType && mediaInfo.mimeType.search(Constants.TTML) >= 0) {
                    ttml = true;
                }
                return ttml;
            };

            textTrackInfo.captionData = captionData;
            textTrackInfo.lang = mediaInfo.lang;
            textTrackInfo.label = mediaInfo.id; // AdaptationSet id (an unsigned int)
            textTrackInfo.index = mediaInfo.index; // AdaptationSet index in manifest
            textTrackInfo.isTTML = checkTTML();
            textTrackInfo.defaultTrack = getIsDefault(mediaInfo);
            textTrackInfo.isFragmented = isFragmented;
            textTrackInfo.isEmbedded = mediaInfo.isEmbedded ? true : false;
            textTrackInfo.kind = getKind();
            textTrackInfo.roles = mediaInfo.roles;
            let totalNrTracks = (mediaInfos ? mediaInfos.length : 0) + embeddedTracks.length;
            textTracks.addTextTrack(textTrackInfo, totalNrTracks);
        }

        if (mediaType === Constants.FRAGMENTED_TEXT) {
            if (!initializationSegmentReceived) {
                initializationSegmentReceived = true;
                for (i = 0; i < mediaInfos.length; i++) {
                    createTextTrackFromMediaInfo(null, mediaInfos[i]);
                }
                timescale = fragmentedTextBoxParser.getMediaTimescaleFromMoov(bytes);
            } else {
                samplesInfo = fragmentedTextBoxParser.getSamplesInfo(bytes);
                sampleList = samplesInfo.sampleList;
                if (!firstSubtitleStart && sampleList.length > 0) {
                    firstSubtitleStart = sampleList[0].cts - chunk.start * timescale;
                }
                if (codecType.search(Constants.STPP) >= 0) {
                    parser = parser !== null ? parser : getParser(codecType);
                    for (i = 0; i < sampleList.length; i++) {
                        let sample = sampleList[i];
                        let sampleStart = sample.cts;
                        let sampleRelStart = sampleStart - firstSubtitleStart;
                        this.buffered.add(sampleRelStart / timescale, (sampleRelStart + sample.duration) / timescale);
                        let dataView = new DataView(bytes, sample.offset, sample.subSizes[0]);
                        ccContent = ISOBoxer.Utils.dataViewToString(dataView, Constants.UTF8);
                        let images = [];
                        let subOffset = sample.offset + sample.subSizes[0];
                        for (j = 1; j < sample.subSizes.length; j++) {
                            let inData = new Uint8Array(bytes, subOffset, sample.subSizes[j]);
                            let raw = String.fromCharCode.apply(null, inData);
                            images.push(raw);
                            subOffset += sample.subSizes[j];
                        }
                        try {
                            // Only used for Miscrosoft Smooth Streaming support - caption time is relative to sample time. In this case, we apply an offset.
                            let manifest = manifestModel.getValue();
                            let offsetTime = manifest.ttmlTimeIsRelative ? sampleStart / timescale : 0;
                            result = parser.parse(ccContent, offsetTime, sampleStart / timescale, (sampleStart + sample.duration) / timescale, images);
                            textTracks.addCaptions(currFragmentedTrackIdx, firstSubtitleStart / timescale, result);
                        } catch (e) {
                            fragmentModel.removeExecutedRequestsBeforeTime();
                            this.remove();
                            log('TTML parser error: ' + e.message);
                        }
                    }
                } else {
                    // WebVTT case
                    let captionArray = [];
                    for (i = 0 ; i < sampleList.length; i++) {
                        let sample = sampleList[i];
                        sample.cts -= firstSubtitleStart;
                        this.buffered.add(sample.cts / timescale, (sample.cts + sample.duration) / timescale);
                        let sampleData = bytes.slice(sample.offset, sample.offset + sample.size);
                        // There are boxes inside the sampleData, so we need a ISOBoxer to get at it.
                        let sampleBoxes = ISOBoxer.parseBuffer(sampleData);

                        for (j = 0 ; j < sampleBoxes.boxes.length; j++) {
                            let box1 = sampleBoxes.boxes[j];
                            log('VTT box1: ' + box1.type);
                            if (box1.type === 'vtte') {
                                continue; //Empty box
                            }
                            if (box1.type === 'vttc') {
                                log('VTT vttc boxes.length = ' + box1.boxes.length);
                                for (k = 0 ; k < box1.boxes.length; k++) {
                                    let box2 = box1.boxes[k];
                                    log('VTT box2: ' + box2.type);
                                    if (box2.type === 'payl') {
                                        let cue_text = box2.cue_text;
                                        log('VTT cue_text = ' + cue_text);
                                        let start_time = sample.cts / timescale;
                                        let end_time = (sample.cts + sample.duration) / timescale;
                                        captionArray.push({
                                            start: start_time,
                                            end: end_time,
                                            data: cue_text,
                                            styles: {}
                                        });
                                        log('VTT ' + start_time + '-' + end_time + ' : ' + cue_text);
                                    }
                                }
                            }
                        }
                    }
                    if (captionArray.length > 0) {
                        textTracks.addCaptions(currFragmentedTrackIdx, 0, captionArray);
                    }
                }
            }
        } else if (mediaType === Constants.TEXT) {
            let dataView = new DataView(bytes, 0, bytes.byteLength);
            ccContent = ISOBoxer.Utils.dataViewToString(dataView, Constants.UTF8);

            try {
                result = getParser(codecType).parse(ccContent, 0);
                createTextTrackFromMediaInfo(result, mediaInfo);
            } catch (e) {
                errHandler.timedTextError(e, 'parse', ccContent);
            }
        } else if (mediaType === Constants.VIDEO) { //embedded text
            if (chunk.segmentType === HTTPRequest.INIT_SEGMENT_TYPE) {
                if (embeddedTimescale === 0) {
                    embeddedTimescale = fragmentedTextBoxParser.getMediaTimescaleFromMoov(bytes);
                    for (i = 0; i < embeddedTracks.length; i++) {
                        createTextTrackFromMediaInfo(null, embeddedTracks[i]);
                    }
                }
            } else { // MediaSegment
                if (embeddedTimescale === 0) {
                    log('CEA-608: No timescale for embeddedTextTrack yet');
                    return;
                }
                const makeCueAdderForIndex = function (self, trackIndex) {
                    function newCue(startTime, endTime, captionScreen) {
                        let captionsArray = null;
                        if (videoModel.getTTMLRenderingDiv()) {
                            captionsArray = embeddedTextHtmlRender.createHTMLCaptionsFromScreen(videoModel.getElement(), startTime, endTime, captionScreen);
                        } else {
                            let text = captionScreen.getDisplayText();
                            //log("CEA text: " + startTime + "-" + endTime + "  '" + text + "'");
                            captionsArray = [{
                                start: startTime,
                                end: endTime,
                                data: text,
                                styles: {}
                            }];
                        }
                        if (captionsArray) {
                            textTracks.addCaptions(trackIndex, 0, captionsArray);
                        }
                    }
                    return newCue;
                };


                samplesInfo = fragmentedTextBoxParser.getSamplesInfo(bytes);

                let sequenceNumber = samplesInfo.lastSequenceNumber;

                if (!embeddedCea608FieldParsers[0] && !embeddedCea608FieldParsers[1]) {
                    // Time to setup the CEA-608 parsing
                    let field, handler, trackIdx;
                    for (i = 0; i < embeddedTracks.length; i++) {
                        if (embeddedTracks[i].id === Constants.CC1) {
                            field = 0;
                            trackIdx = textTracks.getTrackIdxForId(Constants.CC1);
                        } else if (embeddedTracks[i].id === Constants.CC3) {
                            field = 1;
                            trackIdx = textTracks.getTrackIdxForId(Constants.CC3);
                        }
                        if (trackIdx === -1) {
                            log('CEA-608: data before track is ready.');
                            return;
                        }
                        handler = makeCueAdderForIndex(this, trackIdx);
                        embeddedCea608FieldParsers[i] = new cea608parser.Cea608Parser(i, {
                            'newCue': handler
                        }, null);
                    }
                }

                if (embeddedTimescale && embeddedSequenceNumbers.indexOf(sequenceNumber) == -1) {
                    if (embeddedLastSequenceNumber !== null && sequenceNumber !== embeddedLastSequenceNumber + samplesInfo.numSequences) {
                        for (i = 0; i < embeddedCea608FieldParsers.length; i++) {
                            if (embeddedCea608FieldParsers[i]) {
                                embeddedCea608FieldParsers[i].reset();
                            }
                        }
                    }

                    let allCcData = extractCea608Data(bytes, samplesInfo.sampleList);

                    for (let fieldNr = 0; fieldNr < embeddedCea608FieldParsers.length; fieldNr++) {
                        let ccData = allCcData.fields[fieldNr];
                        let fieldParser = embeddedCea608FieldParsers[fieldNr];
                        if (fieldParser) {
                            /*if (ccData.length > 0 ) {
                                log("CEA-608 adding Data to field " + fieldNr + " " + ccData.length + "bytes");
                            }*/
                            for (i = 0; i < ccData.length; i++) {
                                fieldParser.addData(ccData[i][0] / embeddedTimescale, ccData[i][1]);
                            }
                        }
                    }
                    embeddedLastSequenceNumber = sequenceNumber;
                    embeddedSequenceNumbers.push(sequenceNumber);
                }
            }
        }
    }
    function append(bytes, chunk) {
        let result,
            sampleList,
            i, j, k,
            samplesInfo,
            ccContent;
        const mediaInfo = chunk.mediaInfo;
        const mediaType = mediaInfo.type;
        const mimeType = mediaInfo.mimeType;
        const codecType = mediaInfo.codec || mimeType;
        if (!codecType) {
            logger.error('No text type defined');
            return;
        }

        if (mediaType === Constants.FRAGMENTED_TEXT) {
            if (!initializationSegmentReceived) {
                initializationSegmentReceived = true;
                timescale = boxParser.getMediaTimescaleFromMoov(bytes);
            } else {
                samplesInfo = boxParser.getSamplesInfo(bytes);
                sampleList = samplesInfo.sampleList;
                if (firstFragmentedSubtitleStart === null && sampleList.length > 0) {
                    firstFragmentedSubtitleStart = sampleList[0].cts - chunk.start * timescale;
                }
                if (codecType.search(Constants.STPP) >= 0) {
                    parser = parser !== null ? parser : getParser(codecType);
                    for (i = 0; i < sampleList.length; i++) {
                        const sample = sampleList[i];
                        const sampleStart = sample.cts;
                        const sampleRelStart = sampleStart - firstFragmentedSubtitleStart;
                        this.buffered.add(sampleRelStart / timescale, (sampleRelStart + sample.duration) / timescale);
                        const dataView = new DataView(bytes, sample.offset, sample.subSizes[0]);
                        ccContent = ISOBoxer.Utils.dataViewToString(dataView, Constants.UTF8);
                        const images = [];
                        let subOffset = sample.offset + sample.subSizes[0];
                        for (j = 1; j < sample.subSizes.length; j++) {
                            const inData = new Uint8Array(bytes, subOffset, sample.subSizes[j]);
                            const raw = String.fromCharCode.apply(null, inData);
                            images.push(raw);
                            subOffset += sample.subSizes[j];
                        }
                        try {
                            // Only used for Miscrosoft Smooth Streaming support - caption time is relative to sample time. In this case, we apply an offset.
                            const manifest = manifestModel.getValue();
                            const offsetTime = manifest.ttmlTimeIsRelative ? sampleStart / timescale : 0;
                            result = parser.parse(ccContent, offsetTime, sampleStart / timescale, (sampleStart + sample.duration) / timescale, images);
                            textTracks.addCaptions(currFragmentedTrackIdx, firstFragmentedSubtitleStart / timescale, result);
                        } catch (e) {
                            fragmentedFragmentModel.removeExecutedRequestsBeforeTime();
                            this.remove();
                            logger.error('TTML parser error: ' + e.message);
                        }
                    }
                } else {
                    // WebVTT case
                    const captionArray = [];
                    for (i = 0 ; i < sampleList.length; i++) {
                        const sample = sampleList[i];
                        sample.cts -= firstFragmentedSubtitleStart;
                        this.buffered.add(sample.cts / timescale, (sample.cts + sample.duration) / timescale);
                        const sampleData = bytes.slice(sample.offset, sample.offset + sample.size);
                        // There are boxes inside the sampleData, so we need a ISOBoxer to get at it.
                        const sampleBoxes = ISOBoxer.parseBuffer(sampleData);

                        for (j = 0 ; j < sampleBoxes.boxes.length; j++) {
                            const box1 = sampleBoxes.boxes[j];
                            logger.debug('VTT box1: ' + box1.type);
                            if (box1.type === 'vtte') {
                                continue; //Empty box
                            }
                            if (box1.type === 'vttc') {
                                logger.debug('VTT vttc boxes.length = ' + box1.boxes.length);
                                for (k = 0 ; k < box1.boxes.length; k++) {
                                    const box2 = box1.boxes[k];
                                    logger.debug('VTT box2: ' + box2.type);
                                    if (box2.type === 'payl') {
                                        const cue_text = box2.cue_text;
                                        logger.debug('VTT cue_text = ' + cue_text);
                                        const start_time = sample.cts / timescale;
                                        const end_time = (sample.cts + sample.duration) / timescale;
                                        captionArray.push({
                                            start: start_time,
                                            end: end_time,
                                            data: cue_text,
                                            styles: {}
                                        });
                                        logger.debug('VTT ' + start_time + '-' + end_time + ' : ' + cue_text);
                                    }
                                }
                            }
                        }
                    }
                    if (captionArray.length > 0) {
                        textTracks.addCaptions(currFragmentedTrackIdx, 0, captionArray);
                    }
                }
            }
        } else if (mediaType === Constants.TEXT) {
            const dataView = new DataView(bytes, 0, bytes.byteLength);
            ccContent = ISOBoxer.Utils.dataViewToString(dataView, Constants.UTF8);

            try {
                result = getParser(codecType).parse(ccContent, 0);
                textTracks.addCaptions(textTracks.getCurrentTrackIdx(), 0, result);
            } catch (e) {
                errHandler.timedTextError(e, 'parse', ccContent);
                errHandler.error(new DashJSError(Errors.TIMED_TEXT_ERROR_ID_PARSE_CODE, Errors.TIMED_TEXT_ERROR_MESSAGE_PARSE + e.message, ccContent));
            }
        } else if (mediaType === Constants.VIDEO) { //embedded text
            if (chunk.segmentType === HTTPRequest.INIT_SEGMENT_TYPE) {
                if (embeddedTimescale === 0) {
                    embeddedTimescale = boxParser.getMediaTimescaleFromMoov(bytes);
                    for (i = 0; i < embeddedTracks.length; i++) {
                        createTextTrackFromMediaInfo(null, embeddedTracks[i]);
                    }
                }
            } else { // MediaSegment
                if (embeddedTimescale === 0) {
                    logger.warn('CEA-608: No timescale for embeddedTextTrack yet');
                    return;
                }
                const makeCueAdderForIndex = function (self, trackIndex) {
                    function newCue(startTime, endTime, captionScreen) {
                        let captionsArray = null;
                        if (videoModel.getTTMLRenderingDiv()) {
                            captionsArray = embeddedTextHtmlRender.createHTMLCaptionsFromScreen(videoModel.getElement(), startTime, endTime, captionScreen);
                        } else {
                            const text = captionScreen.getDisplayText();
                            captionsArray = [{
                                start: startTime,
                                end: endTime,
                                data: text,
                                styles: {}
                            }];
                        }
                        if (captionsArray) {
                            textTracks.addCaptions(trackIndex, 0, captionsArray);
                        }
                    }
                    return newCue;
                };

                samplesInfo = boxParser.getSamplesInfo(bytes);

                const sequenceNumber = samplesInfo.lastSequenceNumber;

                if (!embeddedCea608FieldParsers[0] && !embeddedCea608FieldParsers[1]) {
                    // Time to setup the CEA-608 parsing
                    let field, handler, trackIdx;
                    for (i = 0; i < embeddedTracks.length; i++) {
                        if (embeddedTracks[i].id === Constants.CC1) {
                            field = 0;
                            trackIdx = textTracks.getTrackIdxForId(Constants.CC1);
                        } else if (embeddedTracks[i].id === Constants.CC3) {
                            field = 1;
                            trackIdx = textTracks.getTrackIdxForId(Constants.CC3);
                        }
                        if (trackIdx === -1) {
                            logger.warn('CEA-608: data before track is ready.');
                            return;
                        }
                        handler = makeCueAdderForIndex(this, trackIdx);
                        embeddedCea608FieldParsers[i] = new cea608parser.Cea608Parser(i + 1, {
                            'newCue': handler
                        }, null);
                    }
                }

                if (embeddedTimescale && embeddedSequenceNumbers.indexOf(sequenceNumber) == -1) {
                    if (embeddedLastSequenceNumber !== null && sequenceNumber !== embeddedLastSequenceNumber + samplesInfo.numSequences) {
                        for (i = 0; i < embeddedCea608FieldParsers.length; i++) {
                            if (embeddedCea608FieldParsers[i]) {
                                embeddedCea608FieldParsers[i].reset();
                            }
                        }
                    }

                    const allCcData = extractCea608Data(bytes, samplesInfo.sampleList);

                    for (let fieldNr = 0; fieldNr < embeddedCea608FieldParsers.length; fieldNr++) {
                        const ccData = allCcData.fields[fieldNr];
                        const fieldParser = embeddedCea608FieldParsers[fieldNr];
                        if (fieldParser) {
                            for (i = 0; i < ccData.length; i++) {
                                fieldParser.addData(ccData[i][0] / embeddedTimescale, ccData[i][1]);
                            }
                        }
                    }
                    embeddedLastSequenceNumber = sequenceNumber;
                    embeddedSequenceNumbers.push(sequenceNumber);
                }
            }
        }
    }