Index: webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py |
diff --git a/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py b/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py |
index b0efa13e2b3fb6302a2a2343e0f6531747dff728..720cb9b4cc9c0a60e43bbcfb610f817b2164c40d 100644 |
--- a/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py |
+++ b/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py |
@@ -6,55 +6,39 @@ |
# in the file PATENTS. All contributing project authors may |
# be found in the AUTHORS file in the root of the source tree. |
-import logging |
+import hashlib |
import os |
import re |
class HtmlExport(object): |
- """HTML exporter class for APM quality scores. |
- """ |
+ """HTML exporter class for APM quality scores.""" |
- # Path to CSS and JS files. |
- _PATH = os.path.dirname(os.path.realpath(__file__)) |
+ _NEW_LINE = '\n' |
- # CSS file parameters. |
+ # CSS and JS file paths. |
+ _PATH = os.path.dirname(os.path.realpath(__file__)) |
_CSS_FILEPATH = os.path.join(_PATH, 'results.css') |
- _INLINE_CSS = False |
- |
- # JS file parameters. |
_JS_FILEPATH = os.path.join(_PATH, 'results.js') |
- _INLINE_JS = False |
- |
- _NEW_LINE = '\n' |
def __init__(self, output_filepath): |
- self._test_data_generator_names = None |
- self._test_data_generator_params = None |
+ self._scores_data_frame = None |
self._output_filepath = output_filepath |
- def Export(self, scores): |
- """Exports the scores into an HTML file. |
+ def Export(self, scores_data_frame): |
+ """Exports scores into an HTML file. |
Args: |
- scores: nested dictionary containing the scores. |
+ scores_data_frame: DataFrame instance. |
""" |
- # Generate one table for each evaluation score. |
- tables = [] |
- for score_name in sorted(scores.keys()): |
- tables.append(self._BuildScoreTable(score_name, scores[score_name])) |
- |
- # Create the html file. |
- html = ( |
- '<html>' + |
- self._BuildHeader() + |
- '<body onload="initialize()">' + |
- '<h1>Results from {}</h1>'.format(self._output_filepath) + |
- self._NEW_LINE.join(tables) + |
- '</body>' + |
- '</html>') |
- |
- self._Save(self._output_filepath, html) |
+ self._scores_data_frame = scores_data_frame |
+ html = ['<html>', |
+ self._BuildHeader(), |
+ '<body onload="initialize()">', |
+ self._BuildBody(), |
+ '</body>', |
+ '</html>'] |
+ self._Save(self._output_filepath, self._NEW_LINE.join(html)) |
def _BuildHeader(self): |
"""Builds the <head> section of the HTML file. |
@@ -67,197 +51,289 @@ class HtmlExport(object): |
""" |
html = ['<head>', '<title>Results</title>'] |
- # Function to append the lines of a text file to html. |
+ # Add Material Design hosted libs. |
+ html.append('<link rel="stylesheet" href="http://fonts.googleapis.com/' |
+ 'css?family=Roboto:300,400,500,700" type="text/css">') |
+ html.append('<link rel="stylesheet" href="https://fonts.googleapis.com/' |
+ 'icon?family=Material+Icons">') |
+ html.append('<link rel="stylesheet" href="https://code.getmdl.io/1.3.0/' |
+ 'material.indigo-pink.min.css">') |
+ html.append('<script defer src="https://code.getmdl.io/1.3.0/' |
+ 'material.min.js"></script>') |
+ |
+ # Embed custom JavaScript and CSS files. |
def EmbedFile(filepath): |
with open(filepath) as f: |
for l in f: |
- html.append(l.strip()) |
- |
- # CSS. |
- if self._INLINE_CSS: |
- # Embed. |
- html.append('<style>') |
- EmbedFile(self._CSS_FILEPATH) |
- html.append('</style>') |
- else: |
- # Link. |
- html.append('<link rel="stylesheet" type="text/css" ' |
- 'href="file://{}?">'.format(self._CSS_FILEPATH)) |
- |
- # Javascript. |
- if self._INLINE_JS: |
- # Embed. |
- html.append('<script>') |
- EmbedFile(self._JS_FILEPATH) |
- html.append('</script>') |
- else: |
- # Link. |
- html.append('<script src="file://{}?"></script>'.format( |
- self._JS_FILEPATH)) |
+ html.append(l.rstrip()) |
+ html.append('<script>') |
+ EmbedFile(self._JS_FILEPATH) |
+ html.append('</script>') |
+ html.append('<style>') |
+ EmbedFile(self._CSS_FILEPATH) |
+ html.append('</style>') |
html.append('</head>') |
return self._NEW_LINE.join(html) |
- def _BuildScoreTable(self, score_name, scores): |
- """Builds a table for a specific evaluation score (e.g., POLQA). |
+ def _BuildBody(self): |
+ """Builds the content of the <body> section.""" |
+ score_names = self._scores_data_frame.eval_score_name.unique().tolist() |
+ |
+ html = [ |
+ ('<div class="mdl-layout mdl-js-layout mdl-layout--fixed-header ' |
+ 'mdl-layout--fixed-tabs">'), |
+ '<header class="mdl-layout__header">', |
+ '<div class="mdl-layout__header-row">', |
+ '<span class="mdl-layout-title">APM QA results ({})</span>'.format( |
+ self._output_filepath), |
+ '</div>', |
+ ] |
+ |
+ # Tab selectors. |
+ html.append('<div class="mdl-layout__tab-bar mdl-js-ripple-effect">') |
+ for tab_index, score_name in enumerate(score_names): |
+ is_active = tab_index == 0 |
+ html.append('<a href="#score-tab-{}" class="mdl-layout__tab{}">' |
+ '{}</a>'.format(tab_index, |
+ ' is-active' if is_active else '', |
+ self._FormatName(score_name))) |
+ html.append('</div>') |
+ |
+ html.append('</header>') |
+ html.append('<main class="mdl-layout__content">') |
+ |
+ # Tabs content. |
+ for tab_index, score_name in enumerate(score_names): |
+ html.append('<section class="mdl-layout__tab-panel{}" ' |
+ 'id="score-tab-{}">'.format( |
+ ' is-active' if is_active else '', tab_index)) |
+ html.append('<div class="page-content">') |
+ html.append(self._BuildScoreTab(score_name)) |
+ html.append('</div>') |
+ html.append('</section>') |
+ |
+ html.append('</main>') |
+ html.append('</div>') |
- Args: |
- score_name: name of the score. |
- scores: nested dictionary of scores. |
+ return self._NEW_LINE.join(html) |
- Returns: |
- A string with <table>...</table> HTML. |
- """ |
- config_names = sorted(scores.keys()) |
- input_names = sorted(scores[config_names[0]].keys()) |
- rows = [self._BuildTableRow( |
- score_name, config_name, scores[config_name], input_names) for ( |
- config_name) in config_names] |
+ def _BuildScoreTab(self, score_name): |
+ """Builds the content of a tab.""" |
+ # Find unique values. |
+ scores = self._scores_data_frame[ |
+ self._scores_data_frame.eval_score_name == score_name] |
+ apm_configs = sorted(self._FindUniqueTuples(scores, ['apm_config'])) |
+ test_data_gen_configs = sorted(self._FindUniqueTuples( |
+ scores, ['test_data_gen', 'test_data_gen_params'])) |
+ |
+ html = [ |
+ '<div class="mdl-grid">', |
+ '<div class="mdl-layout-spacer"></div>', |
+ '<div class="mdl-cell mdl-cell--10-col">', |
+ ('<table class="mdl-data-table mdl-js-data-table mdl-shadow--2dp" ' |
+ 'style="width: 100%;">'), |
+ ] |
+ |
+ # Header. |
+ html.append('<thead><tr><th>APM config / Test data generator</th>') |
+ for test_data_gen_info in test_data_gen_configs: |
+ html.append('<th>{} {}</th>'.format( |
+ self._FormatName(test_data_gen_info[0]), test_data_gen_info[1])) |
+ html.append('</tr></thead>') |
+ |
+ # Body. |
+ html.append('<tbody>') |
+ for apm_config in apm_configs: |
+ html.append('<tr><td>' + self._FormatName(apm_config[0]) + '</td>') |
+ for test_data_gen_info in test_data_gen_configs: |
+ onclick_handler = 'openScoreStatsInspector(\'{}\')'.format( |
+ self._ScoreStatsInspectorDialogId(score_name, apm_config[0], |
+ test_data_gen_info[0], |
+ test_data_gen_info[1])) |
+ html.append('<td onclick="{}">{}</td>'.format( |
+ onclick_handler, self._BuildScoreTableCell( |
+ score_name, test_data_gen_info[0], test_data_gen_info[1], |
+ apm_config[0]))) |
+ html.append('</tr>') |
+ html.append('</tbody>') |
+ |
+ html.append('</table></div><div class="mdl-layout-spacer"></div></div>') |
+ |
+ html.append(self._BuildScoreStatsInspectorDialogs( |
+ score_name, apm_configs, test_data_gen_configs)) |
- html = ( |
- '<table celpadding="0" cellspacing="0">' + |
- '<thead><tr>{}</tr></thead>'.format( |
- self._BuildTableHeader(score_name, input_names)) + |
- '<tbody>' + |
- '<tr>' + '</tr><tr>'.join(rows) + '</tr>' + |
- '</tbody>' + |
- '</table>' + self._BuildLegend()) |
+ return self._NEW_LINE.join(html) |
- return html |
+ def _BuildScoreTableCell(self, score_name, test_data_gen, |
+ test_data_gen_params, apm_config): |
+ """Builds the content of a table cell for a score table.""" |
+ scores = self._SliceDataForScoreTableCell( |
+ score_name, apm_config, test_data_gen, test_data_gen_params) |
+ stats = self._ComputeScoreStats(scores) |
+ |
+ html = [] |
+ items_id_prefix = ( |
+ score_name + test_data_gen + test_data_gen_params + apm_config) |
+ if stats['count'] == 1: |
+ # Show the only available score. |
+ item_id = hashlib.md5(items_id_prefix).hexdigest() |
+ html.append('<div id="single-value-{0}">{1:f}</div>'.format( |
+ item_id, scores['score'].mean())) |
+ html.append('<div class="mdl-tooltip" data-mdl-for="single-value-{}">{}' |
+ '</div>'.format(item_id, 'single value')) |
+ else: |
+ # Show stats. |
+ for stat_name in ['min', 'max', 'mean', 'std dev']: |
+ item_id = hashlib.md5(items_id_prefix + stat_name).hexdigest() |
+ html.append('<div id="stats-{0}">{1:f}</div>'.format( |
+ item_id, stats[stat_name])) |
+ html.append('<div class="mdl-tooltip" data-mdl-for="stats-{}">{}' |
+ '</div>'.format(item_id, stat_name)) |
- def _BuildTableHeader(self, score_name, input_names): |
- """Builds the cells of a table header. |
+ return self._NEW_LINE.join(html) |
- A table header starts with a cell containing the name of the evaluation |
- score, and then it includes one column for each probing signal. |
+ def _BuildScoreStatsInspectorDialogs( |
+ self, score_name, apm_configs, test_data_gen_configs): |
+ """Builds a set of score stats inspector dialogs.""" |
+ html = [] |
+ for apm_config in apm_configs: |
+ for test_data_gen_info in test_data_gen_configs: |
+ dialog_id = self._ScoreStatsInspectorDialogId( |
+ score_name, apm_config[0], |
+ test_data_gen_info[0], test_data_gen_info[1]) |
+ |
+ html.append('<dialog class="mdl-dialog" id="{}" ' |
+ 'style="width: 40%;">'.format(dialog_id)) |
+ |
+ # Content. |
+ html.append('<div class="mdl-dialog__content">') |
+ html.append('<h6><strong>APM config preset</strong>: {}<br/>' |
+ '<strong>Test data generator</strong>: {} ({})</h6>'.format( |
+ self._FormatName(apm_config[0]), |
+ self._FormatName(test_data_gen_info[0]), |
+ test_data_gen_info[1])) |
+ html.append(self._BuildScoreStatsInspectorDialog( |
+ score_name, apm_config[0], test_data_gen_info[0], |
+ test_data_gen_info[1])) |
+ html.append('</div>') |
+ |
+ # Actions. |
+ html.append('<div class="mdl-dialog__actions">') |
+ html.append('<button type="button" class="mdl-button" ' |
+ 'onclick="closeScoreStatsInspector(\'' + dialog_id + '\')">' |
+ 'Close</button>') |
+ html.append('</div>') |
+ |
+ html.append('</dialog>') |
- Args: |
- score_name: name of the score. |
- input_names: list of probing signal names. |
+ return self._NEW_LINE.join(html) |
- Returns: |
- A string with a list of <th>...</th> HTML elements. |
- """ |
- html = ( |
- '<th>{}</th>'.format(self._FormatName(score_name)) + |
- '<th>' + '</th><th>'.join( |
- [self._FormatName(name) for name in input_names]) + '</th>') |
- return html |
+ def _BuildScoreStatsInspectorDialog( |
+ self, score_name, apm_config, test_data_gen, test_data_gen_params): |
+ """Builds one score stats inspector dialog.""" |
+ scores = self._SliceDataForScoreTableCell( |
+ score_name, apm_config, test_data_gen, test_data_gen_params) |
+ |
+ capture_render_pairs = sorted(self._FindUniqueTuples( |
+ scores, ['capture', 'render'])) |
+ echo_simulators = sorted(self._FindUniqueTuples(scores, ['echo_simulator'])) |
+ |
+ html = ['<table class="mdl-data-table mdl-js-data-table mdl-shadow--2dp">'] |
+ |
+ # Header. |
+ html.append('<thead><tr><th>Capture-Render / Echo simulator</th>') |
+ for echo_simulator in echo_simulators: |
+ html.append('<th>' + self._FormatName(echo_simulator[0]) +'</th>') |
+ html.append('</tr></thead>') |
+ |
+ # Body. |
+ html.append('<tbody>') |
+ for capture, render in capture_render_pairs: |
+ html.append('<tr><td><div>{}</div><div>{}</div></td>'.format( |
+ capture, render)) |
+ for echo_simulator in echo_simulators: |
+ score_tuple = self._SliceDataForScoreStatsTableCell( |
+ scores, capture, render, echo_simulator[0]) |
+ html.append('<td class="single-score-cell">{}</td>'.format( |
+ self._BuildScoreStatsInspectorTableCell(score_tuple))) |
+ html.append('</tr>') |
+ html.append('</tbody>') |
+ |
+ html.append('</table>') |
+ |
+ # Placeholder for the audio inspector. |
+ html.append('<div class="audio-inspector-placeholder"></div>') |
- def _BuildTableRow(self, score_name, config_name, scores, input_names): |
- """Builds the cells of a table row. |
+ return self._NEW_LINE.join(html) |
- A table row starts with the name of the APM configuration file, and then it |
- includes one column for each probing singal. |
+ def _BuildScoreStatsInspectorTableCell(self, score_tuple): |
+ """Builds the content of a cell of a score stats inspector.""" |
+ html = ['<div>{}</div>'.format(score_tuple.score)] |
- Args: |
- score_name: name of the score. |
- config_name: name of the APM configuration. |
- scores: nested dictionary of scores. |
- input_names: list of probing signal names. |
+ # Add all the available file paths as hidden data. |
+ for field_name in score_tuple.keys(): |
+ if field_name.endswith('_filepath'): |
+ html.append('<input type="hidden" name="{}" value="{}">'.format( |
+ field_name, score_tuple[field_name])) |
- Returns: |
- A string with a list of <td>...</td> HTML elements. |
- """ |
- cells = [self._BuildTableCell( |
- scores[input_name], score_name, config_name, input_name) for ( |
- input_name) in input_names] |
- html = ('<td>{}</td>'.format(self._FormatName(config_name)) + |
- '<td>' + '</td><td>'.join(cells) + '</td>') |
- return html |
+ return self._NEW_LINE.join(html) |
- def _BuildTableCell(self, scores, score_name, config_name, input_name): |
- """Builds the inner content of a table cell. |
+ def _SliceDataForScoreTableCell( |
+ self, score_name, apm_config, test_data_gen, test_data_gen_params): |
+ """Slices |self._scores_data_frame| to extract the data for a tab.""" |
+ masks = [] |
+ masks.append(self._scores_data_frame.eval_score_name == score_name) |
+ masks.append(self._scores_data_frame.apm_config == apm_config) |
+ masks.append(self._scores_data_frame.test_data_gen == test_data_gen) |
+ masks.append( |
+ self._scores_data_frame.test_data_gen_params == test_data_gen_params) |
+ mask = reduce((lambda i1, i2: i1 & i2), masks) |
+ del masks |
+ return self._scores_data_frame[mask] |
- A table cell includes all the scores computed for a specific evaluation |
- score (e.g., POLQA), APM configuration (e.g., default), and probing signal. |
+ @classmethod |
+ def _SliceDataForScoreStatsTableCell( |
+ cls, scores, capture, render, echo_simulator): |
+ """Slices |scores| to extract the data for a tab.""" |
+ masks = [] |
- Args: |
- scores: dictionary of score data. |
- score_name: name of the score. |
- config_name: name of the APM configuration. |
- input_name: name of the probing signal. |
+ masks.append(scores.capture == capture) |
+ masks.append(scores.render == render) |
+ masks.append(scores.echo_simulator == echo_simulator) |
+ mask = reduce((lambda i1, i2: i1 & i2), masks) |
+ del masks |
- Returns: |
- A string with the HTML of a table body cell. |
- """ |
- # Init test data generator names and parameters cache (if not done). |
- if self._test_data_generator_names is None: |
- self._test_data_generator_names = sorted(scores.keys()) |
- self._test_data_generator_params = {test_data_generator_name: sorted( |
- scores[test_data_generator_name].keys()) for ( |
- test_data_generator_name) in self._test_data_generator_names} |
- |
- # For each noisy input (that is a pair of test data generator and |
- # generator parameters), add an item with the score and its metadata. |
- items = [] |
- for name_index, test_data_generator_name in enumerate( |
- self._test_data_generator_names): |
- for params_index, test_data_generator_params in enumerate( |
- self._test_data_generator_params[test_data_generator_name]): |
- |
- # Init. |
- score_value = '?' |
- metadata = '' |
- |
- # Extract score value and its metadata. |
- try: |
- data = scores[test_data_generator_name][test_data_generator_params] |
- score_value = '{0:f}'.format(data['score']) |
- metadata = ( |
- '<input type="hidden" name="gen_name" value="{}"/>' |
- '<input type="hidden" name="gen_params" value="{}"/>' |
- '<input type="hidden" name="audio_in" value="file://{}"/>' |
- '<input type="hidden" name="audio_out" value="file://{}"/>' |
- '<input type="hidden" name="audio_ref" value="file://{}"/>' |
- ).format( |
- test_data_generator_name, |
- test_data_generator_params, |
- data['audio_in_filepath'], |
- data['audio_out_filepath'], |
- data['audio_ref_filepath']) |
- except TypeError: |
- logging.warning( |
- 'missing score found: <score:%s> <config:%s> <input:%s> ' |
- '<generator:%s> <params:%s>', score_name, config_name, input_name, |
- test_data_generator_name, test_data_generator_params) |
- |
- # Add the score. |
- items.append( |
- '<div class="test-data-gen-desc">[{0:d}, {1:d}]{2}</div>' |
- '<div class="value">{3}</div>'.format( |
- name_index, params_index, metadata, score_value)) |
- |
- html = ( |
- '<div class="score">' + |
- '</div><div class="score">'.join(items) + |
- '</div>') |
- |
- return html |
- |
- def _BuildLegend(self): |
- """Builds the legend. |
- |
- The legend details test data generator name and parameter pairs. |
+ sliced_data = scores[mask] |
+ assert len(sliced_data) == 1, 'single score is expected' |
+ return sliced_data.iloc[0] |
- Returns: |
- A string with a <div class="legend">...</div> HTML element. |
- """ |
- items = [] |
- for name_index, test_data_generator_name in enumerate( |
- self._test_data_generator_names): |
- for params_index, test_data_generator_params in enumerate( |
- self._test_data_generator_params[test_data_generator_name]): |
- items.append( |
- '<div class="test-data-gen-desc">[{0:d}, {1:d}]</div>: {2}, ' |
- '{3}'.format(name_index, params_index, test_data_generator_name, |
- test_data_generator_params)) |
- html = ( |
- '<div class="legend"><div>' + |
- '</div><div>'.join(items) + '</div></div>') |
- |
- return html |
+ @classmethod |
+ def _FindUniqueTuples(cls, data_frame, fields): |
+ """Slices |data_frame| to a list of fields and finds unique tuples.""" |
+ return data_frame[fields].drop_duplicates().values.tolist() |
+ |
+ @classmethod |
+ def _ComputeScoreStats(cls, data_frame): |
+ """Computes score stats.""" |
+ scores = data_frame['score'] |
+ return { |
+ 'count': scores.count(), |
+ 'min': scores.min(), |
+ 'max': scores.max(), |
+ 'mean': scores.mean(), |
+ 'std dev': scores.std(), |
+ } |
+ |
+ @classmethod |
+ def _ScoreStatsInspectorDialogId(cls, score_name, apm_config, test_data_gen, |
+ test_data_gen_params): |
+ """Assigns a unique name to a dialog.""" |
+ return 'score-stats-dialog-' + hashlib.md5( |
+ 'score-stats-inspector-{}-{}-{}-{}'.format( |
+ score_name, apm_config, test_data_gen, |
+ test_data_gen_params)).hexdigest() |
@classmethod |
def _Save(cls, output_filepath, html): |