#!python
# PYTHON_ARGCOMPLETE_OK
#
# Copyright (c) 2020 Björn Gottschall
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.


import argparse
import argcomplete
import tempfile
import pandas
import numpy
import os
import io
import gc
import subprocess
import sys
import pickle
import copy
import textwrap
import shutil
import xopen
import seaborn
import plotly.figure_factory
import scipy.cluster.hierarchy
import scipy.spatial.distance
import sklearn.cluster
import multiprocessing

# import warnings
# warnings.filterwarnings('error')

# FUTURE: pyarrow
# import csv.Sniffer

__prog__ = 'plotgen'
__version__ = '0.7.18'
__url__ = 'https://github.com/bgottschall/plotgen'


# All options that are forwared to the plotting script will be filtered through here
def escapedStr(val: str):
    return val.translate(str.maketrans({
        "'": r"\'",
        "\\": r"\\",
        "\"": r"\"",
    }))


def isFloat(val):
    if isinstance(val, dict):
        return all(isFloat(x) for x in val.values())
    if isinstance(val, list):
        return all(isFloat(x) for x in val)
    if val is None:
        return False
    try:
        float(val)
        return True
    except ValueError:
        return False


class OrderedAction(argparse.Action):
    def __init__(self, *args, ordered=True, sub_action='store', **kwargs):
        super().__init__(*args, **kwargs)
        self.action = sub_action
        self.ordered = ordered

    def __call__(self, parser, namespace, values, option_string=None):
        _action = parser._registry_get('action', self.action, self.action)(self.option_strings, self.dest)
        _action(parser, namespace, values, option_string)
        if 'ordered_args' not in namespace:
            setattr(namespace, 'ordered_args', [])
        if self.ordered:
            previous = namespace.ordered_args
            if (self.action == 'append'):
                for i, (k, v) in enumerate(previous):
                    if k == self.dest:
                        previous[i] = (k, getattr(namespace, self.dest))
                        break
            else:
                previous.append((self.dest, getattr(namespace, self.dest)))
            setattr(namespace, 'ordered_args', previous)


class ParentAction(argparse.Action):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, default=[], **kwargs)
        self.children = []

    def __call__(self, parser, namespace, values, option_string=None):
        items = getattr(namespace, self.dest)
        nspace = type(namespace)()
        for child in self.children:
            if (not child.sticky_default and child.name in ChildAction._adjusting_defaults):
                setattr(nspace, child.name, ChildAction._adjusting_defaults[child.name])
            else:
                setattr(nspace, child.name, child.default)
            setattr(nspace, 'ordered_args', [])
        items.append({'value': values, 'args': nspace})


class ChildAction(argparse.Action):
    _adjusting_defaults = {}

    def __init__(self, *args, parent, sub_action='store', sticky_default=False, ordered=True, **kwargs):
        super().__init__(*args, **kwargs)
        self.dest, self.name = parent.dest, self.dest
        self.sticky_default = sticky_default

        self.sub_action = sub_action
        self.action = OrderedAction
        self.ordered = ordered

        self.parent = parent
        parent.children.append(self)

    def __call__(self, parser, namespace, values, option_string=None):
        ChildAction._adjusting_defaults[self.name] = True if self.action == 'store_true' else values
        items = getattr(namespace, self.dest)
        try:
            lastParentNamespace = items[-1]['args']
        except Exception:
            if (self.sticky_default):
                raise Exception(f'parameter --{self.name} can only be used after --{self.parent.dest}!') from None
                exit(1)
            return
        _action = parser._registry_get('action', self.action, self.action)(self.option_strings, self.name, sub_action=self.sub_action, ordered=self.ordered)
        _action(parser, lastParentNamespace, values, option_string)


class Range(object):
    def __init__(self, start=None, end=None, orValues=None, start_inclusive=True, end_inclusive=True):
        if (start is not None and not isFloat(start)) or (end is not None and not isFloat(end)) or (orValues is not None and not isinstance(orValues, list)):
            raise Exception('invalid use of range object!')
        self.start_inclusive = start_inclusive
        self.end_inclusive = end_inclusive
        self.start = start
        self.end = end
        self.orValues = orValues

    def __eq__(self, other):
        ret = False
        if isFloat(other):
            other = float(other)
            if self.start is None and self.end is None:
                ret = True
            elif self.start is not None and self.end is not None:
                ret = (self.start <= other if self.start_inclusive else self.start < other) and (other <= self.end if self.end_inclusive else other < self.end)
            elif self.start is not None:
                ret = (self.start <= other if self.start_inclusive else self.start < other)
            elif self.end is not None:
                ret = (other <= self.end if self.end_inclusive else other < self.end)
        if not ret and self.orValues is not None:
            ret = other in self.orValues
        return ret

    def __contains__(self, item):
        return self.__eq__(item)

    def __iter__(self):
        yield self

    def __repr__(self):
        return self.__str__()

    def __str__(self):
        if self.start is None and self.end is None:
            ret = '-inf - +inf'
        elif self.start is not None and self.end is not None:
            ret = f'{self.start} - {self.end}'
        elif (self.start is not None):
            ret = f'{self.start} - +inf'
        else:
            ret = f'-inf - {self.end}'
        return ret + (', or ' + ', '.join(self.orValues) if self.orValues is not None and len(self.orValues) > 0 else '')


defaultSliceTypeTranslator = {'all': slice(None)}


def SliceType(translator=defaultSliceTypeTranslator):
    def str2slice(value):
        if value in translator:
            return translator[value]
        try:
            return int(value)
        except ValueError:
            tSection = [int(s) if s else None for s in value.split(':')]
            if len(tSection) > 3:
                raise ValueError(f'{value} is not a valid slice notation')
            return slice(*tSection)
    return str2slice


def isSliceType(value, translator=defaultSliceTypeTranslator):
    if value is None:
        return False
    try:
        SliceType(translator)(value)
        return True
    except Exception:
        return False


def getSliceTypeIds(targetRange: range, slices: list):
    validIntRange = range(-len(targetRange), len(targetRange))
    selectedRanges = [targetRange[s] if isinstance(s, slice) else [targetRange[s]] if s in validIntRange else [] for s in slices]
    return [i for li in selectedRanges for i in li]


def getSliceTypesFromList(target: list, match: list, matchMode='all'):
    ret = []
    for x in match:
        if x not in target:
            if isSliceType(x):
                ret.append([SliceType()(x)])
            continue
        ret.append([target.index(x)] if selectMode == 'first' else [len(target) - target[-1::-1].index(x) - 1] if selectMode == 'last' else [i for i, v in enumerate(target) if v == x])
    return [x for lx in ret for x in lx]


def updateRange(_range, dataList):
    if not isinstance(_range, list):
        raise Exception('updateRange needs a mutable list of min/max directories')
    for a in _range:
        if not isinstance(a, dict):
            raise Exception('updateRange needs a mutable list of directories')
        if 'min' not in a:
            a['min'] = None
        if 'max' not in a:
            a['max'] = None
    while len(_range) < len(dataList):
        _range.extend([{'min': None, 'max': None}])
    for index, data in enumerate(dataList):
        if data is not None:
            if not isinstance(data, list):
                data = [data]
            scope = [float(x) for x in data if isFloat(x)]
            if len(scope) > 0:
                _range[index]['min'] = min(scope) if _range[index]['min'] is None else min(_range[index]['min'], min(scope))
                _range[index]['max'] = max(scope) if _range[index]['max'] is None else max(_range[index]['max'], max(scope))

# https://stackoverflow.com/questions/21844024/weighted-percentile-using-numpy
def weightedQuantiles(values, quantiles, sampleWeight=None):
  values = numpy.array(values)
  quantiles = numpy.array(quantiles)
  if sampleWeight is None:
    sampleWeight = numpy.ones(len(values))
  sampleWeight = numpy.array(sampleWeight)

  valFilter = numpy.where(sampleWeight != 0)
  values = values[valFilter]
  sampleWeight = sampleWeight[valFilter]

  if numpy.sum(sampleWeight) == 0:
    return numpy.zeros(len(quantiles))

  sorter = numpy.argsort(values)
  values = values[sorter]
  sampleWeight = sampleWeight[sorter]

  weightedQuantiles = numpy.cumsum(sampleWeight) - 0.5 * sampleWeight
  weightedQuantiles /= numpy.sum(sampleWeight)
  return numpy.interp(quantiles, weightedQuantiles, values)

# https://stackoverflow.com/questions/2413522/weighted-standard-deviation-in-numpy
def weightedMeanStd(values, weights = None):
  values = numpy.array(values)
  if weights is None:
    weights = numpy.ones(len(values))
  weights = numpy.array(weights)
  if numpy.sum(weights) == 0:
    return (0, 0)
  mean = numpy.average(values, weights=weights)
  variance = numpy.average((values - mean)**2, weights=weights)
  return (mean, numpy.sqrt(variance))

class DataframeActions:

    def considerAsNaN(value):
      return str(value).lower() in ['nan', 'none', 'null', 'zero', 'nodata']

    # Before plotting index and columns MUST BE string, for plotting those can be anything
    # all other data always tries to be float if not enforced otherwise. Any data from a csv
    # is always read in as float if possible
    transformers = {
      'identity': lambda x: x,
      'str' : str,
      'int' : lambda x: numpy.nan if __class__.considerAsNaN(x) or (isFloat(x) and numpy.isnan(float(x))) else int(float(x)) if isFloat(x) else x,
      'float': lambda x: numpy.nan if __class__.considerAsNaN(x) or (isFloat(x) and numpy.isnan(float(x))) else float(x) if isFloat(x) else x,
      'index': lambda x: numpy.nan if __class__.considerAsNaN(x) or (isFloat(x) and numpy.isnan(float(x))) else str((float(x) if numpy.floor(float(x)).astype(int) != float(x) else numpy.floor(float(x)).astype(int)) if isFloat(x) else x),
      'forcefloat': lambda x: numpy.nan if not isFloat(x) else float(x),
      'forceint': lambda x: numpy.nan if not isFloat(x) or numpy.isnan(float(x)) else int(float(x)),
      'mixed': lambda x: numpy.nan if __class__.considerAsNaN(x) or (isFloat(x) and numpy.isnan(float(x))) else (float(x) if numpy.floor(float(x)).astype(int) != float(x) else numpy.floor(float(x)).astype(int)) if isFloat(x) else str(x), # nan if nan, float if float, int if int else str
    }


    functions = {
      'pandasSelfFunctions': ['abs', 'cumsum', 'cummax', 'cummin', 'cumprod', 'rank', 'diff', 'round', 'floor', 'ceil', 'str', 'int', 'float'],
      'specialSelfFunctions': ['set', 'cset', 'polyfit', 'append' , 'prepend', 'ljust', 'rjust', 'cjust', 'kmeans'],
      'amidComputeFunctions': ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow', 'radd', 'rsub', 'rmul', 'rdiv', 'rtruediv', 'rfloordiv', 'rmod', 'rpow', 'lt', 'gt', 'le', 'ge', 'ne', 'eq', 'min', 'max'],
      'onlyAmidComputeFunctions' : ['concat', 'copy', 'kmeans_labels'],
      'soloComputeFunctions': ['sum', 'mean', 'median', 'std', 'var', 'count', 'skew', 'mad', 'min', 'max'],
      'specialGroupFunctions' : ['kmeans'],
      'nonComputeFunctions': ['set', 'cset', 'append', 'prepend', 'ljust', 'rjust', 'cjust', 'concat', 'copy', 'kmeans_labels'] + list(transformers.keys())
    }

    def parseKMeansParameters(parameters: list):
      res = { 'n_clusters' : 8, 'init': 'k-means++', 'n_init': 'auto', 'max_iter': 300, 'tol': 1e-4, 'algorithm': 'lloyd' , 'samples': 'yes'}
      parameters = [str(p) for p in parameters]
      if len(parameters) > 0:
        if parameters[0].isnumeric():
          res['n_clusters'] = parameters[0]
          parameters = parameters[1:]
        for p in parameters:
          spl = p.split('=', 1)
          if len(spl) != 2 or spl[0] not in res.keys():
            raise Exception(f"Unknown KMeans parameter {p}, choose one of {', '.join(res.keys())}")
          res[spl[0]] = spl[1]
      for intk in ['n_clusters', 'max_iter']:
        res[intk] = int(res[intk])
      for floatk in ['tol']:
        res[floatk] = float(res[floatk])
      res['n_init'] = 'auto' if res['n_init'] == 'auto' else int(res['n_init'])
      res['samples'] = True if res['samples'].lower() in ['yes', 'true', '1'] else False
      return res

    conditions = ['=', '!=', '<', '>', '<=', '>=', 'in', 'notin', 'xor', 'nor', 'and', 'nand']

    def filterFunction(frame, data, mode, quiet=False):
        filterMap = {
            '=': lambda x: any((pandas.isna(x) and pandas.isna(y)) or (float(x) == float(y) if isFloat([x, y]) else str(x) == str(y)) for y in data),
            '!=': lambda x: all(((pandas.isna(x) != pandas.isna(y)) if (pandas.isna(x) or pandas.isna(y)) else (float(x) != float(y) if isFloat([x, y]) else str(x) != str(y))) for y in data),
            '<': lambda x: any(float(x) < float(y) if isFloat([x, y]) else str(x) < str(y) for y in data),
            '>': lambda x: any(float(x) > float(y) if isFloat([x, y]) else str(x) > str(y) for y in data),
            '<=': lambda x: any(((pandas.isna(x) and pandas.isna(y)) or float(x) <= float(y) if isFloat([x, y]) else str(x) <= str(y)) for y in data),
            '>=': lambda x: any(((pandas.isna(x) and pandas.isna(y)) or float(x) >= float(y) if isFloat([x, y]) else str(x) >= str(y)) for y in data),
            'in': lambda x: x in data,
            'notin': lambda x: x not in data,
            'xor': lambda x: all(bool(int(y) ^ int(x)) if str(x).isdigit and str(y).isdigit else False for y in data),
            'nor': lambda x: all(not bool(int(y) ^ int(x)) if str(x).isdigit and str(y).isdigit else False for y in data),
            'and': lambda x: all(bool(int(y) & int(x)) if str(x).isdigit and str(y).isdigit else False for y in data),
            'nand': lambda x: all(not bool(int(y) & int(x)) if str(x).isdigit and str(y).isdigit else False for y in data),
        }
        if mode not in filterMap:
            if not quiet:
                raise Exception(f'ERROR: could not find condition {mode}')
        else:
            return frame.apply(filterMap[mode])

    def transpose(dataframe):
      return dataframe.transpose()

    def dropNaN(dataframe, dropAny=False):
      return dataframe.dropna(how='any' if dropAny else 'all', axis=0).dropna(how='any' if dropAny else 'all', axis=1)

    def sliceToColumnIds(dataframe, slices):
      if not isinstance(slices, list):
        slices = [slices]
      return getSliceTypeIds(range(dataframe.shape[1]), slices)

    def sliceToRowIds(dataframe, slices):
      if not isinstance(slices, list):
        slices = [slices]
      return getSliceTypeIds(range(dataframe.shape[0]), slices)

    def dropColumnsByIds(dataframe, colIds):
      filterColumns = numpy.array([False if i in colIds else True for i in range(dataframe.shape[1])])
      return dataframe.loc[:, filterColumns]

    def selectColumnsByIds(dataframe, colIds):
      return dataframe.iloc[:, colIds]

    def dropRowsByIds(dataframe, rowIds):
      filterRows = numpy.array([False if i in rowIds else True for i in range(dataframe.shape[0])])
      return dataframe.iloc[filterRows, :]

    def selectRowsByIds(dataframe, rowIds):
      return dataframe.iloc[rowIds, :]

    def filterRowsByColumnData(dataframe, columnIds, data):
      mode = data[0]
      data = [float(x) if isFloat(x) else x for x in data[1:]]
      for columnId in columnIds:
        mask = [i for (i, x) in enumerate(__class__.filterFunction(dataframe.iloc[:, columnId], data, mode)) if x]
        dataframe = dataframe.iloc[mask, :]
      return dataframe

    def filterColumnsByRowData(dataframe, rowIds, data):
      return __class__.filterRowsByColumnData(dataframe.T, rowIds, data).T

    def getColumnIds(dataframe, columns, mode='all', ignore_errors=False):
      if not isinstance(columns, list):
        columns = [columns]

      columnIds = []

      origColumns = map(str, columns)
      strColumns = map(lambda x: str(float(x)) if isFloat(x) else str(x), columns)
      targetColumns = list(enumerate(map(str, dataframe.columns.tolist())))

      for origCol, strCol in zip(origColumns, strColumns):
        appendIds = [selId for selId, selCol in targetColumns if (selCol == origCol) or (selCol == strCol)]
        if len(appendIds) > 0:
          columnIds.extend([appendIds[0]] if mode == 'first' else [appendIds[-1]] if mode == 'last' else appendIds)
        elif isSliceType(origCol):
          columnIds.extend(__class__.sliceToColumnIds(dataframe, SliceType()(origCol)))
        elif not ignore_errors:
          raise Exception(f'Could not find row/column name {origCol}')
      return columnIds

    def getRowIds(dataframe, rows, mode='all', ignore_errors=False):
      return __class__.getColumnIds(dataframe.T, rows, mode, ignore_errors)

    def transformColumnIds(dataframe, columnIds, transformer):
      if not isinstance(columnIds, list):
        columnIds = [columnIds]
      if 'index' in columnIds:
        dataframe.index = dataframe.index.map(transformer)
        columnIds = [c for c in columnIds if c != 'index']
      if len(columnIds) > 0:
        dataframe = dataframe.iloc[:, columnIds].applymap(transformer)
      return dataframe

    def transformRowIds(dataframe, rowIds, transformer):
      return __class__.transformColumnIds(dataframe.T, ['index' if r == 'columns' else r for r in rowIds], transformer).T

    def dropIndex(dataframe):
      _name = dataframe.index.name
      dataframe.reset_index(drop=True, inplace=True)
      dataframe.index.name = _name
      return dataframe

    def dropColumns(dataframe):
      return __class__.dropIndex(dataframe.T).T

    # By default all data in the frame tries to be float
    def resetIndex(dataframe, transformer = None):
      dataframe = __class__.transformColumnIds(dataframe, ['index'], __class__.transformers['float'] if transformer is None else transformer)
      dataframe.reset_index(inplace=True)
      return dataframe

    def resetColumns(dataframe, transformer = None):
      return __class__.resetIndex(dataframe.T, transformer).T

    # By default all data in index/columns IS string
    def setIndexColumnByIdx(dataframe, colIdx, drop = False, transformer = None):
      dataframe = __class__.transformColumnIds(dataframe.set_index(pandas.Index(dataframe.iloc[:, colIdx], name=dataframe.index.name)), ['index'], __class__.transformers['index'] if transformer is None else transformer)
      return dataframe if not drop else __class__.dropColumnsByIds(dataframe, [colIdx])

    def setColumnsRowByIdx(dataframe, rowIdx, drop = False, transformer = None):
      return __class__.setIndexColumnByIdx(dataframe.T, rowIdx, drop, transformer).T

    def renameColumns(dataframe, columnIds, names):
      names = list(map(__class__.transformers['index'], names))
      lColumns = dataframe.columns.tolist()
      nColumns = dataframe.columns.name
      for i, columnId in enumerate(columnIds):
        if i >= len(names) or i >= len(dataframe.columns):
          break
        if columnId == 'index':
          dataframe.index.name = names[i]
        else:
          lColumns[columnId] = names[i]
      dataframe.columns = lColumns
      dataframe.columns.name = nColumns
      return dataframe

    def renameRows(dataframe, rowIds, names):
      columnIds = ['index' if x == 'columns' else x for x in rowIds]
      return __class__.renameColumns(dataframe.T, columnIds, names).T

    def sortRows(dataframe, function=['none'], order='asc'):
      return __class__.sortColumns(dataframe.T, function, order).T

    def sortColumns(dataframe, function=['none'], order='asc'):
        functionName = function[0]
        if functionName == 'none':
            sortKey = dataframe.reset_index(drop=True).T.reset_index(drop=True).T
            sortKey = sortKey.sort_values(by=sortKey.index.tolist(), axis=1, ascending=(order == 'asc')).T
        else:
            sortKey = getattr(dataframe, functionName)(axis=0).reset_index(drop=True).sort_values(ascending=(order == 'asc'))
        return dataframe.iloc[:, sortKey.index]

    def reverseColumns(dataframe):
        return dataframe.iloc[::, ::-1]

    def reverseRows(dataframe):
        return dataframe.iloc[::-1]

    def sortColumnsByRowIds(dataframe, rowIds, function=['none'], order='asc', quiet=False):
      rowIds = rowIds if isinstance(rowIds, list) else [rowIds]
      rowIds = ['index' if x == 'columns' else x for x in rowIds]
      return __class__.sortRowsByColumnIds(dataframe.T, rowIds, function, order, quiet).T

    def sortRowsByColumnIds(dataframe, colIds, function=['none'], order='asc', quiet=False):
        functionName = function[0]
        if not isinstance(colIds, list):
            colIds = [colIds]
        sortSeries = 'index' in colIds or functionName != 'none'
        if 'index' in colIds:
            sortKey = pandas.to_numeric(dataframe.index, errors="ignore").to_series()
        else:
            sortKey = dataframe.iloc[:, colIds].apply(pandas.to_numeric, errors="ignore")
            if functionName != 'none':
                sortKey = getattr(sortKey, functionName)(axis=1, numeric_only=True)

        sortKey.reset_index(drop=True, inplace=True)
        if sortSeries:
            sortKey.sort_values(ascending=(order == 'asc'), inplace=True)
        else:
            sortKey.columns = range(sortKey.shape[1])
            sortKey.sort_values(by=sortKey.columns.to_list(), axis=0, ascending=(order == 'asc'), inplace=True)

        return dataframe.iloc[sortKey.index, :]

    def addConstant(dataframe, constant):
        return dataframe + constant

    def scaleConstant(dataframe, constant):
        return dataframe * constant

    def normaliseToConstant(dataframe, constant):
        return dataframe / constant

    def normaliseRowIds(dataframe, rowIds, function=['sum']):
      return __class__.normaliseColumnIds(dataframe.T, rowIds, function).T

    def normaliseColumnIds(dataframe, colIds, function=['sum']):
      functionName = function[0]

      if functionName not in __class__.functions['soloComputeFunctions']:
        raise Exception(f'Normalise function \'{functionName}\' not found!')

      normSeries = getattr(dataframe.iloc[:, colIds].apply(pandas.to_numeric, errors='coerce'), functionName)(axis=0)
      dataframe.iloc[:, colIds] = dataframe.iloc[:, colIds].div(normSeries, axis=1)
      return dataframe

    def abs(dataframe):
        return dataframe.abs()

    def applyOnRows(dataframe, applyRowIds, targetRowIds=[], function=['abs'], applyMode='normal', quiet=False):
      return __class__.applyOnColumns(dataframe.T, applyRowIds, targetRowIds, function, applyMode, quiet).T

    def applyOnColumns(dataframe, applyColumnIds, targetColumnIds=[], function=['abs'], applyMode='normal', quiet=False):
        if not isinstance(applyColumnIds, list):
            applyColumnIds = [applyColumnIds]
        if not isinstance(targetColumnIds, list):
            targetColumnIds = [targetColumnIds]
        if not isinstance(function, list):
            function = [function]

        functionName = function[0]
        functionParams = [numpy.nan if __class__.considerAsNaN(x) else int(x) if str(x).isdigit() else float(x) if isFloat(x) else x for x in function[1:]]
        indexWarning = False

        if functionName not in [x for y in [__class__.functions[x] for x in __class__.functions if x != 'soloComputeFunctions'] for x in y]:
            raise Exception(f'apply function with name \'{functionName}\' unknown!')

        with pandas.option_context('mode.chained_assignment', None):
            if functionName in __class__.functions['pandasSelfFunctions'] + __class__.functions['specialSelfFunctions'] + list(__class__.transformers.keys()) + (__class__.functions['amidComputeFunctions'] if len(functionParams) > 0 else []):
                applyColumnIds += targetColumnIds

                if functionName in __class__.transformers.keys():
                  dataframe.iloc[:, applyColumnIds] = dataframe.iloc[:, applyColumnIds].applymap(__class__.transformers[functionName])
                if functionName in __class__.functions['pandasSelfFunctions']:
                    if functionName == 'abs':
                      dataframe.iloc[:, applyColumnIds] = dataframe.iloc[:, applyColumnIds].abs().values
                    elif functionName == 'ceil':
                      dataframe.iloc[:, applyColumnIds] = dataframe.iloc[:, applyColumnIds].apply(numpy.ceil)
                    elif functionName == 'floor':
                      dataframe.iloc[:, applyColumnIds] = dataframe.iloc[:, applyColumnIds].apply(numpy.floor)
                    elif functionName == 'round':
                      decimals = 0 if len(functionParams) == 0 or not str(functionParams[0]).isdigit() else int(functionParams[0])
                      if len(functionParams) > 0 and not str(functionParams[0]).isdigit() and not quiet:
                        print('WARNING: round function parameter needs to be an integer', file=sys.stderr)
                      dataframe.iloc[:, applyColumnIds] = dataframe.iloc[:, applyColumnIds].round(decimals).values
                    else:
                      dataframe.iloc[:, applyColumnIds] = getattr(dataframe.iloc[:, applyColumnIds], functionName)(axis=0).values
                elif functionName == 'kmeans':
                  kmeanParams = __class__.parseKMeansParameters(functionParams)
                  kmeans = sklearn.cluster.KMeans(n_clusters=kmeanParams['n_clusters'], init=kmeanParams['init'], n_init=kmeanParams['n_init'], max_iter=kmeanParams['max_iter'], tol=kmeanParams['tol']).fit(dataframe.iloc[:, applyColumnIds].to_numpy())
                  if kmeans.n_iter_ >= kmeanParams['max_iter'] and not quiet:
                    print('WARNING: KMeans algorithm did not reach convergence. Adjust the tol and/or max_iter parameter!', file=sys.stderr)
                  dataframe.iloc[:, applyColumnIds] = [kmeans.cluster_centers_[i] for i in kmeans.labels_]
                elif functionName == 'append':
                    if len(functionParams) == 0:
                      raise Exception('ERROR: no parameters given for function append, skipping')
                    dataframe.iloc[:, applyColumnIds] = (dataframe.iloc[:, applyColumnIds].astype(str) + ''.join([str(x) for x in functionParams])).apply(pandas.to_numeric, errors='ignore')
                elif functionName == 'prepend':
                    if len(functionParams) == 0:
                      raise Exception('ERROR: no parameters given for function prepend, skipping')
                    dataframe.iloc[:, applyColumnIds] = (''.join([str(x) for x in functionParams]) + dataframe.iloc[:, applyColumnIds].astype(str)).apply(pandas.to_numeric, errors='ignore')
                elif functionName == 'set':
                    if len(functionParams) < 1:
                      raise Exception('ERROR: function set requires a parameters')
                    dataframe.iloc[:, applyColumnIds] = functionParams[0]
                elif functionName == 'cset':
                    if len(functionParams) < 3:
                      raise Exception('ERROR: function conditional set requires 3 parameters')
                    for columnId in applyColumnIds:
                      mask = [i for (i, x) in enumerate(__class__.filterFunction(dataframe.iloc[:, columnId], functionParams[1:-1], functionParams[0])) if x]
                      if len(mask) > 0:
                        dataframe.iloc[mask, columnId] = functionParams[-1]
                elif functionName in ['rjust', 'ljust', 'cjust']:
                    padWidth = functionParams[0] if len(functionParams) > 0  else None
                    padChar = str(functionParams[1]) if len(functionParams) > 1 else ' '
                    padFunc = functionName if functionName != 'cjust' else 'center'
                    dataframe.iloc[:, applyColumnIds] = dataframe.iloc[:, applyColumnIds].astype(str).apply(
                      lambda s: s.apply(lambda v: getattr(v, padFunc)(max(s.str.len()) if padWidth is None else padWidth, padChar))
                    )
                elif functionName == 'polyfit':
                    try:
                        fitDim = 1 if len(functionParams) == 0 else int(functionParams[0])
                    except Exception:
                        raise Exception('ERROR: apply parameter needs to be an integer for polyfit')
                    for columnIdx in applyColumnIds:
                        fitTarget = dataframe.iloc[:, columnIdx].apply(pandas.to_numeric, errors='coerce')
                        if not quiet and fitTarget.isna().values.any():
                            print('WARNING: NaN values are replaced with zero for polynomial fitting!', file=sys.stderr)
                        fitTarget = fitTarget.fillna(0).to_list()
                        fitAlong = [float(x) if isFloat(x) else x for x in dataframe.index.to_list()]
                        if not all([isFloat(x) for x in fitAlong]):
                            if not quiet and not indexWarning:
                                indexWarning = True
                                print('WARNING: index is not numeric, will fit along a static number series!', file=sys.stderr)
                            fitAlong = list(range(len(fitTarget)))
                        dataframe.iloc[:, columnIdx] = numpy.polyval(numpy.polyfit(fitAlong, fitTarget, fitDim), fitAlong)
                elif functionName in ['min', 'max']:
                    if any(not isFloat(x) for x in functionParams) and not quiet:
                        print(f'WARNING: applying function {functionName} not compatible with non-float arguments!', file=sys.stderr)
                    param = getattr(numpy, functionName)([x for x in functionParams if isFloat(x)])
                    dataframe.iloc[:, applyColumnIds] = dataframe.iloc[:, applyColumnIds].apply(pandas.to_numeric, errors='coerce').applymap(lambda x: getattr(numpy, functionName)([x, param]), 'ignore')
                else:
                    if any(not isFloat(x) for x in functionParams) and not quiet:
                        print(f'WARNING: applying function {functionName} not compatible with non-float arguments!', file=sys.stderr)
                    for param in [x for x in functionParams if isFloat(x)]:
                        dataframe.iloc[:, applyColumnIds] = getattr(dataframe.iloc[:, applyColumnIds].apply(pandas.to_numeric, errors='coerce'), functionName)(param)
            else:
                # All other functions are computations between applyColumnIds and targetColumnIds
                if len(targetColumnIds) == 0:
                    # If targetColumnIds is empty, fill it with all columns not in applyColumnIds
                    targetColumnIds = list(range(dataframe.shape[1]))
                    targetColumnIds = [r for r in targetColumnIds if r not in applyColumnIds]

                if len(targetColumnIds) > 0:
                    if applyMode == 'reverse':
                        targetColumnIds, applyColumnIds = applyColumnIds, targetColumnIds

                    if functionName == 'concat':
                        for targetColumnIdx in targetColumnIds:
                            for applyColumnIdx in applyColumnIds:
                                dataframe.iloc[:, targetColumnIdx] = dataframe.iloc[:, targetColumnIdx].astype(str).str.cat(dataframe.iloc[:, applyColumnIdx].astype(str), join='right', sep=str(functionParams[0]) if len(functionParams) > 0 else '').apply(pandas.to_numeric, errors='ignore')
                    elif functionName == 'copy':
                        if len(applyColumnIds) > 0 and not quiet:
                            print('WARNING: only the last column/row has an effect with apply function copy!', file=sys.stderr)
                        dataframe.iloc[:, targetColumnIds] = dataframe.iloc[:, targetColumnIds].apply(lambda _: dataframe.iloc[:, applyColumnIds[-1]], axis=0).values
                    elif functionName in ['min', 'max']:
                        applyColumn = getattr(dataframe.iloc[:, applyColumnIds].apply(pandas.to_numeric, errors='coerce'), functionName)(axis=1)
                        for targetColumn in targetColumnIds:
                          dataframe.iloc[:, targetColumn] = getattr(pandas.concat([applyColumn, dataframe.iloc[:, targetColumn].apply(pandas.to_numeric, errors='coerce')], axis=1), functionName)(axis=1)
                    else:
                        applyColumns = [dataframe.iloc[:, columnIdx].apply(pandas.to_numeric, errors='coerce') for columnIdx in applyColumnIds]
                        for applyColumn in applyColumns:
                            dataframe.iloc[:, targetColumnIds] = getattr(dataframe.iloc[:, targetColumnIds].apply(pandas.to_numeric, errors="coerce"), functionName)(applyColumn, axis=0).values
        return dataframe

    def addRow(dataframe, name, function=['mean'], where='back'):
      return __class__.addColumn(dataframe.T, name, function, where).T

    def addColumn(dataframe, name, function=['mean'], where='back'):
      functionName = function[0]
      functionParams = [numpy.nan if __class__.considerAsNaN(x) else int(x) if str(x).isdigit() else float(x) if isFloat(x) else x for x in function[1:]]
      if functionName == 'set':
        if len(functionParams) == 0:
          raise Exception('--add-function set requires one parameter as set constant')
        newCol = pandas.Series(data=[functionParams[0]] * dataframe.shape[0], name=name, index=dataframe.index)
      else:
        newCol = getattr(dataframe.apply(pandas.to_numeric, errors='coerce'), functionName)(axis=1)
        newCol.name = name
      return pandas.concat([dataframe, newCol], axis=1) if where == 'back' else pandas.concat([newCol, dataframe], axis=1)

    def groupNColumns(dataframe, n, function=['sum'], quiet=False):
      return __class__.groupNRows(dataframe.T, n, quiet).T

    def groupNRows(dataframe, n, function=['sum'], quiet=False):
      functionName = function[0]
      treatIndexAsFloat = False
      indexName = dataframe.index.name
      index = None
      if functionName == 'kmeans':
        raise Exception('grouping n rows/columns is incompatible with the kmeans function')
      if all(isFloat(x) or x is numpy.nan for x in map(__class__.transformers['float'], dataframe.index)):
        treatIndexAsFloat = True
        dataframe = __class__.resetIndex(dataframe)
      else:
        print('WARNING: grouping n rows/columns with a non-numerical index will use a interpolated version', file=sys.stderr)
        index = dataframe.index
      dataframe = getattr(dataframe.groupby(numpy.arange(len(dataframe.index)) // n, axis=0), functionName)()
      if treatIndexAsFloat:
        dataframe = __class__.setIndexColumnByIdx(dataframe, 0, True)
      else:
        dataframe.index = index[::n]
      dataframe.index.name = indexName
      return dataframe

    def groupAllColumns(dataframe, columnName, function=['sum']):
      return __class__.groupAllRows(dataframe.T, columnName, function).T

    def groupAllRows(dataframe, rowName, function=['sum']):
      functionName = function[0]
      if functionName == 'kmeans':
        raise Exception('grouping to a single row/column is incompatible with the kmeans function')
      return getattr(dataframe.apply(pandas.to_numeric, errors='coerce').groupby(lambda _: rowName), functionName)()

    def groupByRowIds(dataframe, rowIds, function=['sum'], specialColumnPrefix='_'):
      rowIds = rowIds if isinstance(rowIds, list) else [rowIds]
      rowIds = ['index' if x == 'columns' else x for x in rowIds]
      return __class__.groupByColumnIds(dataframe.T, rowIds, function, specialColumnPrefix).T

    def groupByColumnIds(dataframe, columnIds, function=['sum'], specialColumnPrefix='_'):
        functionName = function[0]
        functionParams = function[1:]
        columnIds = columnIds if isinstance(columnIds, list) else [columnIds]

        if functionName == 'kmeans':
          kmeanParams = __class__.parseKMeansParameters(functionParams)
          kmeans = sklearn.cluster.KMeans(n_clusters=kmeanParams['n_clusters'], init=kmeanParams['init'], n_init=kmeanParams['n_init'], max_iter=kmeanParams['max_iter'], tol=kmeanParams['tol']).fit(dataframe.iloc[:, columnIds])
          if kmeans.n_iter_ >= kmeanParams['max_iter'] and not quiet:
            print('WARNING: KMeans algorithm did not reach convergence. Adjust the tol and/or max_iter parameter!', file=sys.stderr)
          dataframe = pandas.DataFrame(kmeans.cluster_centers_, columns=kmeans.feature_names_in_)
          if kmeanParams['samples']:
            dataframe = pandas.concat([pandas.Series(numpy.unique(kmeans.labels_, return_counts=True)[1], name=specialColumnPrefix + 'samples'), dataframe], axis=1)
          dataframe.index.name = 'cluster'
          return __class__.transformColumnIds(dataframe, ['index'], __class__.transformers['index'])
        else:
          seen = set()
          columnIds = [x for x in columnIds if x not in seen and not seen.add(x)]
          newColumnLabels = [dataframe.columns[x] for x in columnIds] + [dataframe.columns[x] for x in range(dataframe.shape[1]) if x not in columnIds]
          dataframe.columns = list(range(dataframe.shape[1]))
          dataframe = getattr(dataframe.convert_dtypes().groupby(by=columnIds, as_index=False, sort=False), functionName)()
          if len(dataframe.columns) != len(newColumnLabels):
            raise Exception('Grouping dropped "nuisances" columns, and is considered not successful. This is probably due to incompatible data types in columns.')
          dataframe.columns = newColumnLabels
          return __class__.transformColumnIds(dataframe.set_index(dataframe.iloc[:, 0]).iloc[:, 1:], ['index'], __class__.transformers['index'])

    def stackRowId(dataframe, rowId):
      if rowId == 'columns':
        l0Name = dataframe.index.name
        dataframe = dataframe.T.set_index(dataframe.T.iloc[:, 0], append=True).T
        dataframe = dataframe.iloc[1:, :].stack(0)
        dataframe = dataframe.to_frame() if not isinstance(dataframe, pandas.DataFrame) else dataframe
        dataframe = dataframe.reset_index()
        dataframe = dataframe.set_index(dataframe.iloc[:, 0]).iloc[:, 1:]
        dataframe.columns = ['columns'] + dataframe.columns.tolist()[1:]
        dataframe.index.name = l0Name
      else:
        l0Name = dataframe.index.name
        dataframe = dataframe.T.set_index(dataframe.T.iloc[:, rowId], append=True).T
        dataframe = dataframe.iloc[1:, :].stack(1)
        dataframe = dataframe.to_frame() if not isinstance(dataframe, pandas.DataFrame) else dataframe
        dataframe = dataframe.reset_index()
        dataframe = dataframe.set_index(dataframe.iloc[:, 0]).iloc[:, 1:]
        dataframe.index.name = l0Name
      return __class__.transformColumnIds(__class__.transformRowIds(__class__.transformColumnIds(dataframe, list(range(dataframe.shape[1])), __class__.transformers['float']), ['columns'], __class__.transformers['index']), ['index'], __class__.transformers['index'])

    def stackColumnId(dataframe, columnId):
      return __class__.stackRowId(dataframe.T, 'columns' if columnId == 'index' else columnId).T

    def unstackRowId(dataframe, rowId):
      return __class__.unstackColumnId(dataframe.T, 'index' if rowId == 'columns' else rowId).T

    def unstackColumnId(dataframe, columnId):
      if columnId == 'index':
        l0Name = dataframe.columns[0]
        dataframe = dataframe.set_index(dataframe.iloc[:, 0], append=True).iloc[:, 1:]
        dataframe = dataframe.unstack(0)
        dataframe = dataframe.to_frame() if not isinstance(dataframe, pandas.DataFrame) else dataframe
        dataframe = dataframe.T.reset_index()
        dataframe = dataframe.set_index(dataframe.iloc[:, 0]).iloc[:, 1:].T
        dataframe.index.name = l0Name
        dataframe.columns.name = None
      else:
        l0Name = dataframe.index.name
        dataframe = __class__.dropColumnsByIds(dataframe.set_index(dataframe.iloc[:, columnId], append=True), [columnId])
        dataframe = dataframe.unstack(1)
        dataframe = dataframe.to_frame() if not isinstance(dataframe, pandas.DataFrame) else dataframe
        dataframe = dataframe.T.reset_index()
        dataframe = dataframe.set_index(dataframe.iloc[:, 0]).iloc[:, 1:].T
        dataframe.index.name = l0Name
        dataframe.columns.name = None
      return __class__.transformColumnIds(__class__.transformRowIds(__class__.transformColumnIds(dataframe, list(range(dataframe.shape[1])), __class__.transformers['float']), ['columns'], __class__.transformers['index']), ['index'], __class__.transformers['index'])

    def joinFrames(dataframes, function='index', how='outer'):
      joinedFrame = None
      for frame in dataframes:

        joinedFrame = frame if joinedFrame is None else pandas.concat([joinedFrame, frame], axis=(1 if function == 'index' else 0), join=how, verify_integrity=False, copy=True)
      return joinedFrame

    def splitFramesByRowIdx(dataframes, rowIdx):
        newFrames = []
        for frame in dataframes:
            if rowIdx == 'columns':
                for v in frame.columns.unique():
                    columnIds = __class__.getColumnIds(frame, v, 'all', False)
                    newFrames.append(__class__.selectColumnsByIds(frame, columnIds))
            else:
                rowIdx = int(rowIdx)
                for v in frame.iloc[rowIdx, :].unique():
                    if v != v:
                        newFrames.append(frame[frame.iloc[rowIdx, :].isna()])
                    else:
                        newFrames.append(frame[frame.iloc[rowIdx, :] == v])
        return newFrames

    def splitFramesByColumnIdx(dataframes, columnIdx):
        newFrames = []
        for frame in dataframes:
            if columnIdx == 'index':
                for v in frame.index.unique():
                    rowIds = __class__.getRowIds(frame, v, 'all', False)
                    newFrames.append(__class__.selectRowsByIds(frame, rowIds))
            else:
                columnIdx = int(columnIdx)
                for v in frame.iloc[:, columnIdx].unique():
                    if v != v:
                        newFrames.append(frame[frame.iloc[:, columnIdx].isna()])
                    else:
                        newFrames.append(frame[frame.iloc[:, columnIdx] == v])
        return newFrames

    def printFrames(filenames, dataframe, frameIndex, frameCount, precision=None):
        if not isinstance(filenames, list):
            filenames = [filenames]
        consoleWidth = shutil.get_terminal_size((80, 40))
        pSep = '---'
        if len(filenames) > 0:
            pFiles = f"File: {', '.join(filenames)}"
            pSep = '-' * min(consoleWidth.columns, len(pFiles))
        print(pSep + f'\nFrame: {dataframe.index.name} ({frameIndex+1}/{frameCount})')
        if len(filenames) > 0:
            print(textwrap.fill(pFiles, width=consoleWidth.columns, subsequent_indent=' '))
        print(pSep)
        with pandas.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', consoleWidth.columns, 'display.max_columns', None, 'display.float_format', None if precision is None else f'{{:.{precision}f}}'.format):
            print(dataframe)
        print(pSep)

    def framesToCSV(dataframes, filenames=['stdout'], separator=None, quiet=False, threads=0, precision=None):
        created = []
        if len(filenames) < len(dataframes):
            filenames = filenames + ([filenames[-1]] * (len(dataframes) - len(filenames)))
        for _index, (frame, filename) in enumerate(zip(dataframes, filenames)):
            sep = ';' if separator is None else separator
            if filename.endswith('.tsv'):
                sep = '\t'
            elif filename.endswith('.csv'):
                sep = ';'
            fFile = sys.stdout if filename == 'stdout' else sys.stderr if filename == 'stderr' else xopen.xopen(filename, 'w' if filename not in created else 'a', threads=threads)
            created.append(filename)
            frame.to_csv(fFile, sep=sep, na_rep='NaN', float_format=None if precision is None else f'%.{precision}f')
            if (fFile != sys.stdout and fFile != sys.stdout):
                fFile.close()
            if not quiet and not fFile == sys.stdout:
                print(f'Frame {_index + 1}/{len(dataframes)} saved to {filename}')

    def framesToPickle(dataframes, filename, quiet=False, threads=0):
        fFile = sys.stdout.buffer if filename == 'stdout' else sys.stderr.buffer if filename == 'stderr' else xopen.xopen(filename, 'wb', threads=threads)
        pickle.dump(dataframes, fFile, pickle.HIGHEST_PROTOCOL)
        if not quiet and not fFile == sys.stdout.buffer:
            print(f'Dataframes saved to {filename}')
        if (fFile != sys.stdout.buffer and fFile != sys.stdout.buffer):
            fFile.close()



traceSpecialColumns = ['error', 'error-', 'error+', 'offset', 'label', 'colour', 'size']
frameSpecialColumns = ['category']
allSpecialColumns = traceSpecialColumns + frameSpecialColumns

parser = argparse.ArgumentParser(description="Visualize your data the easy way")
# Global Arguments

parserFileOptions = parser.add_argument_group('file parsing options')

inputFileArgument = parser.add_argument('-i', '--input', type=str, help="input file to parse", nargs="+", action=ParentAction, required=True)
# Per File Parsing Arguments
parserFileOptions.add_argument("--special-column-prefix", help=f"special column prefix ({','.join(allSpecialColumns)}) starting with (default %(default)s)", type=str, default='_', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--comment", help="ignores lines starting with (default %(default)s)", type=str, default='#', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--delimiter", help="data delimiter (auto detected by default)", type=str, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--no-columns", help="do not use a column row", default=False, sticky_default=True, nargs=0, sub_action="store_true", action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--no-index", help="do not use a index column", default=False, sticky_default=True, nargs=0, sub_action="store_true", action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--threads", help="number of threads to decompress files", default=multiprocessing.cpu_count(), type=int, choices=Range(0, None), action=ChildAction, parent=inputFileArgument)


parserFileOptions.add_argument("--index-icolumn", help="set index column by index", type=int, sticky_default=True, choices=Range(None, None), default=None, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--index-column", help="set index column by name", default=None, type=str, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--columns-irow", help="set columns row by index", type=int, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--columns-row", help="set columns row by name", default=None, type=str, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--reset-index", help="reset index back as first column (will be reinterpreted as float/nan data if possible)", default=False, nargs=0, sub_action="store_true", sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--drop-index", help="drops index", default=False, nargs=0, sub_action="store_true", sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--reset-columns", help="reset columns back as first row (will be reinterpret as float/nan data if possible)", default=False, nargs=0, sub_action="store_true", sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--drop-columns", help="drop columns", default=False, nargs=0, sub_action="store_true", sticky_default=True, action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--select-mode", help="select row/columns policy (default %(default)s)", type=str, default='all', choices=['all', 'first', 'last'], action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--ignore-icolumns", help="ignore column by indexes (or slices)", type=SliceType(), default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--ignore-columns", help="ignore columns by names", type=str, default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--ignore-irows", help="ignore row by indexes (or slices)", type=SliceType(), default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--ignore-rows", help="ignore rows by name", type=str, default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--ignore-iframes", help="ignore frame by indexes (or slices)", type=SliceType(), default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--ignore-frames", help="ignore frames by names", type=str, default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--select-irows", help="select row by indexes (or slices)", type=SliceType(), default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--select-rows", help="select rows by names", type=str, default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--select-icolumns", help="select column by indexes (or slices)", type=SliceType(), default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--select-columns", help="select columns by names", type=str, default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--select-iframes", help="select frame by indexes (or slices)", type=SliceType(), default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--select-frames", help="select frames by name", type=str, default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--filter-irows", help=f'filter columns by the content of the row index (or slice) (e.g. --filter-irows 0 >= 3), available conditions: {{{",".join(DataframeActions.conditions)}}}', type=str, default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--filter-row", help=f'filter columns by the content of the row name (e.g. --filter-row type in alu fp br), available conditions: {{{",".join(DataframeActions.conditions)}}}', type=str, default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--filter-icolumns", help=f'filter rows by the content of the column index (or slice) (e.g. --filter-icolumn 1 xor 0x9 0xc), available conditions: {{{",".join(DataframeActions.conditions)}}}', type=str, default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--filter-column", help=f'filter rows by the content of the column name (e.g. --filter-column time > 10), available conditions: {{{",".join(DataframeActions.conditions)}}}', type=str, default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--sort-order", help="sets sort order for sort operations (default %(default)s)", default='asc', choices=['asc', 'desc'], action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--sort-function", help="use a sort function when sorting across multiple rows or columns (default 'none'), none will sort the rows or columns in given order", default=['none'], nargs='+', choices=['none'] + DataframeActions.functions['soloComputeFunctions'], action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--sort-rows", help="sort all rows", default=False, sub_action="store_true", nargs=0, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--sort-columns", help="sort all columns", default=False, sub_action="store_true", nargs=0, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--sort-by-irows", help="sort column after rows by indexes (or slices)", type=SliceType({'columns': 'columns', **defaultSliceTypeTranslator}), default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--sort-by-rows", help="sort column after rows names", type=str, default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--sort-by-icolumns", help="sort rows after columns by indexes (or slices)", type=SliceType({'index': 'index', **defaultSliceTypeTranslator}), default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--sort-by-columns", help="sort rows after columns by names", type=str, default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--reverse-columns", help="reverse column order", default=False, sub_action="store_true", nargs=0, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--reverse-rows", help="reverse row order", default=False, sub_action="store_true", nargs=0, sticky_default=True, action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--data-scale", help="scales all data by this value (equivalent to --apply-function mul <x> --apply-icolumns :)", type=float, default=1, choices=Range(None, None), sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--data-offset", help="offsets all data by this value (equivalent to --apply-function add <x> --apply-icolumns :)", type=float, default=0, choices=Range(None, None), sticky_default=True, action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--normalise-function", help=f"normalise function (default '{DataframeActions.functions['soloComputeFunctions'][0]}')", type=str, default=[DataframeActions.functions['soloComputeFunctions'][0]], nargs='+', choices=DataframeActions.functions['soloComputeFunctions'], sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--normalise-icolumns", help="normalise these columns by index (or slice) with selected normalise function", type=SliceType(), default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--normalise-columns", help="normalise these columns with selected normalise function", type=str, default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--normalise-irows", help="normalise these rows by column index (or slice) with selected normalise function", type=SliceType(), default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--normalise-rows", help="normalise these rows with selected normalise function", type=str, default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--normalise-to", help="normalise data to constant (equivalent to --apply-function div <x> --apply-icolumns :)", type=float, default=0, choices=Range(None, None), sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--normalise-to-icolumn", help="normalise all columns to this column by index (or slice)", type=SliceType(), default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--normalise-to-column", help="normalise all columns to this column by name", type=str, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--normalise-to-irow", help="normalise all rows to this row by index (or slice)", type=SliceType(), default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--normalise-to-row", help="normalise all rows to this row by name", type=str, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)


parserFileOptions.add_argument("--add-at", help="add rows or columns at the front or back of the frame", type=str, default='back', choices=['front', 'back'], action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--add-function", help=f"use this function to compute new row or column (default '{DataframeActions.functions['soloComputeFunctions'][0]}')", type=str, default=[DataframeActions.functions['soloComputeFunctions'][0]], nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--add-column", help="add a new column with name", type=str, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--add-row", help="add a new row with name", type=str, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--group-function", help=f'function used to group columns or rows (default {DataframeActions.functions["soloComputeFunctions"][0]}), choose from {{{",".join(DataframeActions.functions["soloComputeFunctions"])}}} and for grouping by rows and columns also from {{{",".join(DataframeActions.functions["specialGroupFunctions"])}}}', type=str, default=[DataframeActions.functions['soloComputeFunctions'][0]], nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--group-by-icolumns", help="group by these column indexes (or slice, or frame index)", type=SliceType({'index': 'index', **defaultSliceTypeTranslator}), default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--group-by-columns", help="group by these column names", type=str, default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--group-by-irows", help="group by these row indexes (or slice, or frame columns)", type=SliceType({'columns': 'columns', **defaultSliceTypeTranslator}), default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--group-by-rows", help="group by these row names", type=str, default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--group-to-column", help="group all columns into a single one", type=str, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--group-to-row", help="group all rows into a single one", type=str, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--group-n-columns", help="group n columns together", type=int, choices=Range(2, None), sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--group-n-rows", help="group n rows together", type=int, choices=Range(2, None), sticky_default=True, action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--stack-icolumn", help="stack this column index (or slice, or frame index)", type=SliceType({'index': 'index', **defaultSliceTypeTranslator}), default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--stack-column", help="stack this column name", type=str, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--stack-irow", help="stack this row index (or slice, or frame columns)", type=SliceType({'columns': 'columns', **defaultSliceTypeTranslator}), default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--stack-row", help="stack this row name", type=str, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--unstack-icolumn", help="stack this column index (or slice, or frame index)", type=SliceType({'index': 'index', **defaultSliceTypeTranslator}), default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--unstack-column", help="stack this column name", type=str, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--unstack-irow", help="stack this row index (or slice, or frame columns)", type=SliceType({'columns': 'columns', **defaultSliceTypeTranslator}), default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--unstack-row", help="stack this row name", type=str, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--abs", help="convert all values to absolute values", type=str, default=False, nargs=0, sub_action="store_true", sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--apply-mode", help="apply function from first row or column argument on to the rest or in reverse (default %(default)s)", type=str, default='normal', choices=['normal', 'reverse'], action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--apply-function", help=f'apply this function onto rows or columns or between rows or columns. Only applied on row/column itself -- {{{",".join(DataframeActions.functions["pandasSelfFunctions"] + DataframeActions.functions["specialSelfFunctions"])}}}, applied with parameter on row/column itself or between rows/columns without parameter -- {{{",".join(DataframeActions.functions["amidComputeFunctions"])}}}, only applied between rows/columns -- {{{",".join(DataframeActions.functions["onlyAmidComputeFunctions"])}}}. E.g. --apply-function add 2 --apply-icolumns all (adds 2 to all columns), --apply-function add --apply-icolumns :5 5: (adds the first 5 columns to all columns after the 5th), --apply-function cset = nan 0 --apply-icolumns : (conditional set, sets all NaN values to 0), --apply-function polyfit 2 --apply-icolumn 0 (tries to do a order 2 polynomial fit of the first column), --apply-function concat " -> " --apply-icolumn 0 1 (concatenates the first onto the second column with " -> " as separator), ...', type=str, nargs='+', default=['abs'], action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--apply-icolumns", help="apply function on or between columns by indexes (or slices)", type=SliceType({'index': 'index', **defaultSliceTypeTranslator}), default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--apply-columns", help="apply function on or between columns by name", type=str, default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--apply-irows", help="apply function on or between rows by indexes (or slices)", type=SliceType({'columns' : 'columns', **defaultSliceTypeTranslator}), default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--apply-rows", help="apply function on or between rows by name", type=str, default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--column-names", help="rename columns, start from the first column (e.g. --column-names first second third)", type=str, sticky_default=True, default=[], nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--icolumn-names", help="rename specific columns selected by index or slice (e.g. --icolumn names 3: third fourth fifth)", type=str, sticky_default=True, default=[], nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--row-names", help="rename rows, start from the first (e.g. --row-names first second third)", type=str, sticky_default=True, default=[], nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--irow-names", help="rename specific rows selected by index or slice (e.g. --irow-names 3::2 third fifth seventh)", type=str, sticky_default=True, default=[], nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--frame-names", help="rename frames, start from the first (e.g. --frame-names first second third)", type=str, sticky_default=True, default=[], nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--iframe-names", help="rename specific frames selected by index or slice (--iframe-names -1 last)", type=str, sticky_default=True, default=[], nargs='+', action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--drop-nan", help="drop rows/columns that only contain NaN values", sticky_default=True, default=False, nargs=0, sub_action="store_true", action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--drop-any-nan", help="drop rows/columns that contain NaN values", sticky_default=True, default=False, nargs=0, sub_action="store_true", action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--transpose", help="transpose rows and columns", default=False, sticky_default=True, nargs=0, sub_action="store_true", action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--print", help="print out all active frame", default=False, nargs=0, sub_action="store_true", action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--join", help="joins dataframes on columns or index by default with an outer join (e.g. --join index, --join columns inner)", default=['index', 'outer'], choices=['index', 'columns', 'outer', 'inner'], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--split-icolumn", help="split frames into multiple frames along this column by index (or slice, or frame index)", type=str, sticky_default=True, choices=Range(None, None, ['index']), default=None, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--split-column", help="split frames into mutiple frames along this column by name", type=str, sticky_default=True, default=None, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--split-irow", help="split frames into multiple frames along this row by index (or slice, or frame columns)", type=str, sticky_default=True, choices=Range(None, None, ['columns']), default=None, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--split-row", help="split frames into multiple frames along this row by name", type=str, sticky_default=True, default=None, action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--focus-iframes", help="set the focus on these frames by index (or slice)", type=SliceType(), default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--focus-frames", help="set the focus on these frames by name", type=str, default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--defocus-iframes", help="remote the focus from these frames by index (or slice)", type=SliceType(), default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--defocus-frames", help="remove the focus from these frames by name", type=str, default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)

parserFileOptions.add_argument("--print-precision", help="set output precision for text and console output (default %(default)s)", type=str, default='default', choices=Range(0, None, ['default']), action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--file", help="save frames to files (one file per frame, stdout for standard output)", default=None, type=str, nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserFileOptions.add_argument("--pickle", help="save pickled frames to files (one file containing all frames, stdout for standard output)", default=None, type=str, sticky_default=True, action=ChildAction, parent=inputFileArgument)

# Per File Plotting Arguments:
parserPlotOptions = parser.add_argument_group('plot options')
parserPlotOptions.add_argument('--plot', choices=['line', 'bar', 'box', 'violin', 'heatmap', 'dendrogram'], help='plot type', default='line', action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--orientation", help="set plot orientation", default='auto', choices=['vertical', 'v', 'horizontal', 'h', 'auto'], action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--title", help="subplot title", type=escapedStr, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--use-name", help="use name for traces", type=escapedStr, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument('--row', type=int, choices=Range(1, None), help='subplot row (default %(default)s)', default=1, action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument('--rowspan', type=int, choices=Range(1, None), help='subplot rowspan (default %(default)s)', default=1, action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument('--col', type=int, choices=Range(1, None), help='subplot column (default %(default)s)', default=1, action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument('--colspan', type=int, choices=Range(1, None), help='subplot columnspan (default %(default)s)', default=1, action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--text-template", help="set text template for bar and line plots (default %(default)s)", type=escapedStr, default='', action=ChildAction, parent=inputFileArgument)

parserPlotOptions.add_argument("--category", help="how are category axis shown", default='normal', choices=['none', 'normal', 'reversed'], action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--error", help="show error markers in plot (need to be supplied by data)", default='hide', choices=['show', 'hide'], action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--trace-names", help="set individual trace names", default=[], sticky_default=True, type=escapedStr, nargs='+', action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--trace-colours", help="define explicit trace colours", default=[], nargs='+', type=escapedStr, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--line-width", help="set line width (default %(default)s)", type=int, default=1, choices=Range(0,), action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--line-colour", help="set line colour  (default %(default)s) (line charts are using regular colour)", type=escapedStr, default='#222222', action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--opacity", help="colour opacity (default 0.8 for overlay modes, else 1.0)", choices=Range(0, 1, ['auto']), action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--offsetgroups", help="set explicit offsetgroups for e.g. bar charts", type=int, default='auto', nargs='+', choices=Range(0, None, ['auto']), sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--legend-entries", help="choose which entries are shown in legend", choices=['all', 'unique', 'none'], default=None, action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--distribution-mode", help="define whether the input data is parsed as binned or aggregated data (e.g. histogramm data) for appropriate plots (box and violin)", type=str, choices=['normal', 'aggregated', 'raggregated'], default='normal', action=ChildAction, parent=inputFileArgument)
parserPlotOptions.add_argument("--distribution-scale", help="scale data points to this number in distribution mode aggregated (default %(default)s)", type=float, choices=Range(0, None), default=0, action=ChildAction, parent=inputFileArgument)

parserLinePlotOptions = parser.add_argument_group('line plot options')
parserLinePlotOptions.add_argument("--line-mode", choices=['none', 'lines', 'markers', 'text', 'lines+markers', 'lines+text', 'markers+text', 'lines+markers+text'], help="choose linemode (default %(default)s)", default='lines', action=ChildAction, parent=inputFileArgument)
parserLinePlotOptions.add_argument('--line-fill', choices=['none', 'tozeroy', 'tozerox', 'tonexty', 'tonextx', 'toself', 'tonext'], help='fill line area (default %(default)s)', default='none', action=ChildAction, parent=inputFileArgument)
parserLinePlotOptions.add_argument('--line-stack', help='stack line input traces (default %(default)s)', default=False, sticky_default=True, nargs=0, sub_action="store_true", action=ChildAction, parent=inputFileArgument)
parserLinePlotOptions.add_argument('--line-shape', choices=['linear', 'spline', 'hv', 'vh', 'hvh', 'vhv'], help='choose line shape (default %(default)s)', default='linear', action=ChildAction, parent=inputFileArgument)
parserLinePlotOptions.add_argument('--line-dash', choices=['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot'], help='choose line dash (default %(default)s)', default='solid', action=ChildAction, parent=inputFileArgument)
parserLinePlotOptions.add_argument('--line-markers', choices=['circle', 'circle-open', 'circle-dot', 'circle-open-dot', 'square', 'square-open', 'square-dot', 'square-open-dot', 'diamond', 'diamond-open', 'diamond-dot', 'diamond-open-dot', 'cross', 'cross-open', 'cross-dot', 'cross-open-dot', 'x', 'x-open', 'x-dot', 'x-open-dot', 'triangle-up', 'triangle-up-open', 'triangle-up-dot', 'triangle-up-open-dot', 'triangle-down', 'triangle-down-open', 'triangle-down-dot', 'triangle-down-open-dot', 'triangle-left', 'triangle-left-open', 'triangle-left-dot', 'triangle-left-open-dot', 'triangle-right', 'triangle-right-open', 'triangle-right-dot', 'triangle-right-open-dot', 'triangle-ne', 'triangle-ne-open', 'triangle-ne-dot', 'triangle-ne-open-dot', 'triangle-se', 'triangle-se-open', 'triangle-se-dot', 'triangle-se-open-dot', 'triangle-sw', 'triangle-sw-open', 'triangle-sw-dot', 'triangle-sw-open-dot', 'triangle-nw', 'triangle-nw-open', 'triangle-nw-dot', 'triangle-nw-open-dot', 'pentagon', 'pentagon-open', 'pentagon-dot', 'pentagon-open-dot', 'hexagon', 'hexagon-open', 'hexagon-dot', 'hexagon-open-dot', 'hexagon2', 'hexagon2-open', 'hexagon2-dot', 'hexagon2-open-dot', 'octagon', 'octagon-open', 'octagon-dot', 'octagon-open-dot', 'star', 'star-open', 'star-dot', 'star-open-dot', 'hexagram', 'hexagram-open', 'hexagram-dot', 'hexagram-open-dot', 'star-triangle-up', 'star-triangle-up-open', 'star-triangle-up-dot', 'star-triangle-up-open-dot', 'star-triangle-down', 'star-triangle-down-open', 'star-triangle-down-dot', 'star-triangle-down-open-dot', 'star-square', 'star-square-open', 'star-square-dot', 'star-square-open-dot', 'star-diamond', 'star-diamond-open', 'star-diamond-dot', 'star-diamond-open-dot', 'diamond-tall', 'diamond-tall-open', 'diamond-tall-dot', 'diamond-tall-open-dot', 'diamond-wide', 'diamond-wide-open', 'diamond-wide-dot', 'diamond-wide-open-dot', 'hourglass', 'hourglass-open', 'bowtie', 'bowtie-open', 'circle-cross', 'circle-cross-open', 'circle-x', 'circle-x-open', 'square-cross', 'square-cross-open', 'square-x', 'square-x-open', 'diamond-cross', 'diamond-cross-open', 'diamond-x', 'diamond-x-open', 'cross-thin', 'cross-thin-open', 'x-thin', 'x-thin-open', 'asterisk', 'asterisk-open', 'hash', 'hash-open', 'hash-dot', 'hash-open-dot', 'y-up', 'y-up-open', 'y-down', 'y-down-open', 'y-left', 'y-left-open', 'y-right', 'y-right-open', 'line-ew', 'line-ew-open', 'line-ns', 'line-ns-open', 'line-ne', 'line-ne-open', 'line-nw', 'line-nw-open', 'arrow-up', 'arrow-up-open', 'arrow-down', 'arrow-down-open', 'arrow-left', 'arrow-left-open', 'arrow-right', 'arrow-right-open', 'arrow-bar-up', 'arrow-bar-up-open', 'arrow-bar-down', 'arrow-bar-down-open', 'arrow-bar-left', 'arrow-bar-left-open', 'arrow-bar-right', 'arrow-bar-right-open', 'arrow', 'arrow-open', 'arrow-wide', 'arrow-wide-open'], help='choose line marker (default circle)', default=[], nargs='+', action=ChildAction, parent=inputFileArgument)
parserLinePlotOptions.add_argument('--line-marker-width', help='choose marker line width (default %(default)s)', type=int, default=1, choices=Range(0,), action=ChildAction, parent=inputFileArgument)
parserLinePlotOptions.add_argument('--line-marker-size', help='choose line marker size (default %(default)s)', type=int, default=6, choices=Range(0, None), action=ChildAction, parent=inputFileArgument)
parserLinePlotOptions.add_argument('--line-marker-opacity', help='choose line marker size (default %(default)s)', default='auto', choices=Range(0, 1, ['auto']), action=ChildAction, parent=inputFileArgument)
parserLinePlotOptions.add_argument("--line-text-position", choices=["top left", "top center", "top right", "middle left", "middle center", "middle right", "bottom left", "bottom center", "bottom right"], help="choose line text positon (default %(default)s)", default='middle center', action=ChildAction, parent=inputFileArgument)

parserBarPlotOptions = parser.add_argument_group('bar plot options')
parserBarPlotOptions.add_argument("--bar-mode", help="choose barmode (default %(default)s)", choices=['stack', 'group', 'overlay', 'relative'], default='group')
parserBarPlotOptions.add_argument("--bar-width", help="set explicit bar width", choices=Range(0, None, ['auto']), default='auto', action=ChildAction, parent=inputFileArgument)
parserBarPlotOptions.add_argument("--bar-shift", help="set bar shift", choices=Range(None, None, ['auto']), default='auto', action=ChildAction, parent=inputFileArgument)
parserBarPlotOptions.add_argument("--bar-text-position", help="choose bar text position (default %(default)s)", choices=["inside", "outside", "auto", "none"], default='none', action=ChildAction, parent=inputFileArgument)
parserBarPlotOptions.add_argument("--bar-text-anchor", help="choose bar inside text anchor (default %(default)s)", choices=["end", "middle", "start"], default='start', action=ChildAction, parent=inputFileArgument)
parserBarPlotOptions.add_argument("--bar-text-angle", help="set bar text angle (default %(default)s)", choices=Range(None, None, ['auto']), default='auto', action=ChildAction, parent=inputFileArgument)
parserBarPlotOptions.add_argument("--bar-gap", help="set bar gap (default $(default)s)", choices=Range(0, 1, ['auto']), default='auto')
parserBarPlotOptions.add_argument("--bar-group-gap", help="set bar group gap (default $(default)s)", choices=Range(0, 1), default=0)

parserViolinPlotOptions = parser.add_argument_group('violin plot options')
parserViolinPlotOptions.add_argument("--violin-mode", help="choose violinmode (default %(default)s)", choices=['overlay', 'group', 'halfoverlay', 'halfgroup', 'halfhalf', 'poshalfgroup', 'neghalfgroup', 'poshalf', 'neghalf'], default='overlay', action=ChildAction, parent=inputFileArgument)
parserViolinPlotOptions.add_argument("--violin-mean", help="choose violin mean (default %(default)s)", choices=['none', 'line', 'box'], default='none', action=ChildAction, parent=inputFileArgument)
parserViolinPlotOptions.add_argument("--violin-points", help="set points mode for (default %(default)s)", type=str, default='none', choices=['all', 'outliers', 'suspectedoutliers', 'none'], action=ChildAction, parent=inputFileArgument)
parserViolinPlotOptions.add_argument("--violin-jitter", help="set jitter for violin points (default %(default)s)", type=float, default=0, choices=Range(0, 1), action=ChildAction, parent=inputFileArgument)
parserViolinPlotOptions.add_argument("--violin-width", help="change violin widths (default %(default)s)", type=float, default=0, choices=Range(0,), action=ChildAction, parent=inputFileArgument)
parserViolinPlotOptions.add_argument("--violin-gap", help="gap between violins (default %(default)s) (not compatible with violin-width)", type=float, default=0.3, choices=Range(0, 1))
parserViolinPlotOptions.add_argument("--violin-group-gap", help="gap between violin groups (default %(default)s) (not compatible with violin-width)", type=float, default=0.3, choices=Range(0, 1))

parserBoxPlotOptions = parser.add_argument_group('box plot options')
parserBoxPlotOptions.add_argument("--box-mode", choices=['overlay', 'group'], help="choose boxmode (default %(default)s)", default='overlay')
parserBoxPlotOptions.add_argument("--box-whisker", help="choose what the box whisker are representing in precomputed box types (e.g. iqr, minmax or p99 to p76 - percentiles, default %(default)s)", default='minmax', type=str, action=ChildAction, parent=inputFileArgument)
parserBoxPlotOptions.add_argument("--box-type", choices=['raw', 'precomputed'], help="choose whether to precompute boxes or to include all raw values (default %(default)s)", default='precomputed', action=ChildAction, parent=inputFileArgument)
parserBoxPlotOptions.add_argument("--box-mean", choices=['none', 'line', 'dot'], help="choose box mean (default %(default)s)", default='dot', action=ChildAction, parent=inputFileArgument)
parserBoxPlotOptions.add_argument("--box-width", help="box width (default %(default)s)", type=float, default=0, choices=Range(0,), action=ChildAction, parent=inputFileArgument)
parserBoxPlotOptions.add_argument("--box-gap", help="gap between boxes (default %(default)s) (not compatible with box-width)", type=float, default=0.3, choices=Range(0, 1))
parserBoxPlotOptions.add_argument("--box-group-gap", help="gap between box groups (default %(default)s) (not compatible with box-width)", type=float, default=0.3, choices=Range(0, 1))


parserDendrogramPlotOptions = parser.add_argument_group('dendrogram plot options')
parserBoxPlotOptions.add_argument("--dendrogram-distance", help="choose the distance function, default %(default)s)", default='euclidean', choices=['chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon', 'kulczynski1', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'], action=ChildAction, parent=inputFileArgument)
parserBoxPlotOptions.add_argument("--dendrogram-linkage", help="choose the linkage function, default %(default)s)", default='average', choices=['single' ,'complete', 'average', 'weighted', 'ward', 'centroid', 'median'], action=ChildAction, parent=inputFileArgument)
parserBoxPlotOptions.add_argument("--dendrogram-colour-threshold", help="colouring threshold, default %(default)s)", default=None, type=float, action=ChildAction, parent=inputFileArgument)

parserPlotAxisOptions = parser.add_argument_group('plot axis options')
parserPlotAxisOptions.add_argument("--y-secondary", help="plot to secondary y-axis", default=False, sticky_default=True, nargs=0, sub_action="store_true", action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-title", help="y-axis title", type=escapedStr, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-title", help="x-axis title", type=escapedStr, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-title-standoff", help="added margin between tick labels and y-title in px", choices=Range(0,), default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-title-standoff", help="added margin between tick labels and x-title in px", choices=Range(0,), default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-type", help="choose type for y-axis (default %(default)s)", choices=['-', 'linear', 'log', 'date', 'category'], default='-', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-type", help="choose type for x-axis (default %(default)s)", choices=['-', 'linear', 'log', 'date', 'category'], default='-', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-show", help="show y-axis (default %(default)s)", default='auto', sticky_default=True, choices=['on', 'off', 'auto'], action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-show", help="show x-axis (default %(default)s)", default='auto', sticky_default=True, choices=['on', 'off', 'auto'], action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-autorange", help="choose autorange mode for x-axis (default %(default)s)", choices=['on', 'off', 'reversed'], default='on', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-autorange", help="choose autorange mode for y-axis (default %(default)s)", choices=['on', 'off', 'reversed'], default='on', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-range-mode", help="choose range mode for x-axis (default %(default)s)", choices=['normal', 'tozero', 'nonnegative'], default='normal', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-range-mode", help="choose range mode for y-axis (default %(default)s)", choices=['normal', 'tozero', 'nonnegative'], default='normal', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--z-range-from", help="z-axis start (default %(default)s)", default='auto', sticky_default=True, choices=Range(None, None, ['auto']), action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-range-from", help="y-axis start (default %(default)s)", default='auto', sticky_default=True, choices=Range(None, None, ['auto']), action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-range-from", help="x-axis start (default %(default)s)", default='auto', sticky_default=True, choices=Range(None, None, ['auto']), action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--z-range-to", help="z-axis end (default %(default)s)", default='auto', sticky_default=True, choices=Range(None, None, ['auto']), action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-range-to", help="y-axis end (default %(default)s)", default='auto', sticky_default=True, choices=Range(None, None, ['auto']), action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-range-to", help="x-axis end (default %(default)s)", default='auto', sticky_default=True, choices=Range(None, None, ['auto']), action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-tick-format", help="set format of y-axis ticks", default='', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-tick-format", help="set format of x-axis ticks", default='', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-tick-suffix", help="set suffix to y-axis ticks", default='', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-tick-suffix", help="set suffix to x-axis ticks", default='', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-tick-prefix", help="set prefix to y-axis ticks", default='', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-tick-prefix", help="set prefix to x-axis ticks", default='', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-ticks", help="where to draw the y-axis ticks (default '%(default)s')", choices=['', 'inside', 'outside'], default='', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-ticks", help="where to draw the x-axis ticks (default '%(default)s')", choices=['', 'inside', 'outside'], default='', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-tickmode", help="tick mode y-axis (default '%(default)s')", choices=['auto', 'linear', 'array'], default='auto', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-tickmode", help="tick mode x-axis (default '%(default)s')", choices=['auto', 'linear', 'array'], default='auto', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-nticks", help="number of ticks on y-axis (only tick mode auto) (default %(default)s)", choices=Range(0,), default=0, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-nticks", help="number of ticks on x-axis (only tick mode auto) (default %(default)s)", choices=Range(0,), default=0, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-tick0", help="first tick on y-axis (only tick mode linear) (default %(default)s)", type=escapedStr, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-tick0", help="first tick on x-axis (only tick mode linear) (default %(default)s)", type=escapedStr, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-dtick", help="tick step on y-axis (only tick mode linear) (default %(default)s)", type=escapedStr, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-dtick", help="tick step on x-axis (only tick mode linear) (default %(default)s)", type=escapedStr, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-tickvals", help="tick values on y-axis (only tick mode array) (default %(default)s)", default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-tickvals", help="tick values on x-axis (only tick mode array) (default %(default)s)", default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-ticktext", help="tick text on y-axis (only tick mode array) (default %(default)s)", default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-ticktext", help="tick text on x-axis (only tick mode array) (default %(default)s)", default=[], sticky_default=True, nargs='+', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-tickangle", help="tick angle on y-axis (default %(default)s)", default='auto', sticky_default=True, choices=Range(None, None, ['auto']), action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-tickangle", help="tick angle on x-axis (default %(default)s)", default='auto', sticky_default=True, choices=Range(None, None, ['auto']), action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-grid-colour", help="set y-grid colour", type=escapedStr, default=None, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-grid-colour", help="set x-grid colour", type=escapedStr, default=None, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-colour", help="set y-axis colour", type=escapedStr, default=None, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-colour", help="set x-axis colour", type=escapedStr, default=None, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-line-width", help="set y-axis line width (default %(default)s)", type=float, choices=Range(0, None), default=0, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-line-width", help="set x-axis line width (default %(default)s)", type=float, choices=Range(0, None), default=0, action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-show-grid", help="set the visibility of the y-grid (default %(default)s)", choices=['auto', 'on', 'off'], default='auto', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-show-grid", help="set the visibility of the x-grid (default %(default)s)", choices=['auto', 'on', 'off'], default='auto', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-show-zero-line", help="set the visibility of the y zero line (default %(default)s)", choices=['auto', 'on', 'off'], default='auto', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-show-zero-line", help="set the visibility of the x zero line (default %(default)s)", choices=['auto', 'on', 'off'], default='auto', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-show-tick-labels", help="set the visibility of the y tick labels (default %(default)s)", choices=['auto', 'on', 'off'], default='auto', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-show-tick-labels", help="set the visibility of the x tick labels (default %(default)s)", choices=['auto', 'on', 'off'], default='auto', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--y-mirror", help="set y axis mirror mode (default %(default)s)", choices=['auto', 'on', 'ticks', 'off' , 'all', 'allticks'], default='auto', action=ChildAction, parent=inputFileArgument)
parserPlotAxisOptions.add_argument("--x-mirror", help="set x axis mirror mode (default %(default)s)", choices=['auto', 'on', 'ticks', 'off' , 'all', 'allticks'], default='auto', action=ChildAction, parent=inputFileArgument)

parserPlotAxisOptions.add_argument("--grid-colour", help="set grid colour", type=escapedStr, default=None, action=ChildAction, parent=inputFileArgument)

parserColourOptions = parser.add_argument_group('colour options')
parserColourOptions.add_argument("--theme", help="theme to use (all colour options only apply to 'palette')", default='palette', choices=["palette", "plotly", "plotly_white", "plotly_dark", "ggplot2", "seaborn", "simple_white", "none"])
parserColourOptions.add_argument("--colours", help="define explicit colours (filled up by palette)", default=[], nargs='+', type=escapedStr)
parserColourOptions.add_argument("--palette", help="valid seaborn colour palette (default %(default)s)", type=str, default='ch:s=2.8,rot=0.1,d=0.85,l=0.15')
parserColourOptions.add_argument("--palette-opacity", help="palette colour opacity (default %(default)s)", type=float, choices=Range(0, 1.0), default=1.0)
parserColourOptions.add_argument("--palette-reverse", help="reverse colour palette", action="store_true", default=False)
parserColourOptions.add_argument("--palette-count", help="manually set the number of colours to generate from the palette", type=int, choices=Range(1, None), default=None)
parserColourOptions.add_argument("--palette-start", help="set the palette start index (default %(default)s)", type=int, default=0, choices=Range(0, None))
parserColourOptions.add_argument("--palette-cmap", help="how to generate continous colour maps (default %(default)s)", type=str, default='linear', choices=['linear', 'log', 'rlog'])
parserColourOptions.add_argument("--subplot-colours", help="specify explicit subplot colours (sets default colour cycle to subplot)", type=escapedStr, default=[], nargs='+', sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserColourOptions.add_argument("--subplot-palette", help="valid seaborn colour palette used for this subplot (sets default colour cycle to subplot)", type=str, default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserColourOptions.add_argument("--subplot-palette-opacity", help="subplot palette opacity", type=float, choices=Range(0, 1.0), default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserColourOptions.add_argument("--subplot-palette-reverse", help="reverse subplot colour palette", sub_action="store_true", default=None, nargs=0, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserColourOptions.add_argument("--subplot-palette-count", help="manually set the number of colours to generate from the subplot palette (set default colour cycle to subplot)", type=int, choices=Range(1, None), default=None, sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserColourOptions.add_argument("--subplot-palette-start", help="set the subplot palette start index (default 0)", type=int, default=None, choices=Range(0, None), sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserColourOptions.add_argument("--subplot-palette-cmap", help="how to generate continous colour maps (default linear)", type=str, default=None, choices=['linear', 'log', 'rlog'], sticky_default=True, action=ChildAction, parent=inputFileArgument)
parserColourOptions.add_argument("--colour-cycle", help="cycle through colours globally or per subplot (default global)", choices=['subplot', 'global'], default=None, action=ChildAction, parent=inputFileArgument)
parserColourOptions.add_argument("--colour-debug", help="print generated colour palettes", action="store_true", default=False)
parserColourOptions.add_argument("--colour-scale", help="show, reverse or hide continous colour scales (default %(default)s)", choices=['show', 'reverse', 'hide'], default='show', action=ChildAction, parent=inputFileArgument)
parserColourOptions.add_argument("--colour-scale-orientation", help="set the colour scale orientation (default %(default)s)", choices=['vertical', 'v', 'horizontal', 'h'], default='v', action=ChildAction, parent=inputFileArgument)

parserColourOptions.add_argument("--per-trace-colours", help="one colour for each trace (default)", action='store_true', default=False)
parserColourOptions.add_argument("--per-frame-colours", help="one colour for each dataframe", action='store_true', default=False)
parserColourOptions.add_argument("--per-input-colours", help="one colour for each input file", action='store_true', default=False)
parserColourOptions.add_argument("--font-colour", help="font colour (default %(default)s)", type=escapedStr, default='#000000')
parserColourOptions.add_argument("--background-colour", help="set background colour  (default 'rgba(255, 255, 255, 0)')", type=escapedStr, default=None)

parserPlotGlobalOptions = parser.add_argument_group('plot global options')

parserPlotGlobalOptions.add_argument("--global-trace-names", help="set trace names globally", default=[], type=escapedStr, nargs='+')
parserPlotGlobalOptions.add_argument("--master-title", help="plot master title", type=escapedStr, default=None)
parserPlotGlobalOptions.add_argument("--x-master-title", help="x-axis master title", type=escapedStr, default=None)
parserPlotGlobalOptions.add_argument("--y-master-title", help="y-axis master title", type=escapedStr, default=None)
parserPlotGlobalOptions.add_argument("--x-share", help="share subplot x-axis (default %(default)s)", default=False, action="store_true")
parserPlotGlobalOptions.add_argument("--y-share", help="share subplot y-axis (default %(default)s)", default=False, action="store_true")
parserPlotGlobalOptions.add_argument("--vertical-spacing", type=float, help="vertical spacing between subplots", default=None, choices=Range(0, 1))
parserPlotGlobalOptions.add_argument("--horizontal-spacing", type=float, help="horizontal spacing between subplots", default=None, choices=Range(0, 1))
parserPlotGlobalOptions.add_argument("--font-size", help="font size (default %(default)s)", type=int, default=12)
parserPlotGlobalOptions.add_argument("--font-family", help="font family (default %(default)s)", type=escapedStr, default='"Open Sans", verdana, arial, sans-serif')

parserPlotGlobalOptions.add_argument("--legend", help="quick setting the legend position (default %(default)s)", type=str, choices=['topright', 'topcenter', 'topleft', 'bottomright', 'bottomcenter', 'bottomleft', 'middleleft', 'center', 'middleright', 'belowleft', 'belowcenter', 'belowright', 'aboveleft', 'abovecenter', 'aboveright', 'righttop', 'rightmiddle', 'rightbottom', 'lefttop', 'leftmiddle', 'leftbottom'], default='righttop')
parserPlotGlobalOptions.add_argument("--legend-x", help="x legend position", type=float, choices=Range(-2, 3), default=None)
parserPlotGlobalOptions.add_argument("--legend-y", help="y legend position", type=float, choices=Range(-2, 3), default=None)
parserPlotGlobalOptions.add_argument("--legend-x-anchor", help="set legend xanchor", choices=['auto', 'left', 'center', 'right'], default=None)
parserPlotGlobalOptions.add_argument("--legend-y-anchor", help="set legend yanchor", choices=['auto', 'top', 'bottom', 'middle'], default=None)
parserPlotGlobalOptions.add_argument("--legend-hide", help="hides legend", default=None, action="store_true")
parserPlotGlobalOptions.add_argument("--legend-show", help="forces legend to show up", default=None, action="store_true")
parserPlotGlobalOptions.add_argument("--legend-vertical", help="horizontal legend", default=None, action="store_true")
parserPlotGlobalOptions.add_argument("--legend-horizontal", help="vertical legend", default=None, action="store_true")
parserPlotGlobalOptions.add_argument("--legend-order", help="trace order of legend entries (default grouped)", type=str, choices=['normal', 'reversed', 'grouped', 'grouped+reversed', 'reversed+grouped'], default='grouped')
parserPlotGlobalOptions.add_argument("--legend-groupgap", help="gap between legend groups (default %(default)s)", type=int, choices=Range(0, None), default=10)

parserPlotGlobalOptions.add_argument("--margins", help="sets all margins", type=int, choices=Range(0, None), default=None)
parserPlotGlobalOptions.add_argument("--margin-l", help="sets left margin", type=int, choices=Range(0, None), default=None)
parserPlotGlobalOptions.add_argument("--margin-r", help="sets right margin", type=int, choices=Range(0, None), default=None)
parserPlotGlobalOptions.add_argument("--margin-t", help="sets top margin", type=int, choices=Range(0, None), default=None)
parserPlotGlobalOptions.add_argument("--margin-b", help="sets bottom margin", type=int, choices=Range(0, None), default=None)
parserPlotGlobalOptions.add_argument("--margin-pad", help="sets padding", type=int, choices=Range(0, None), default=None)

parserPlotGlobalOptions.add_argument("--width", help="plot width", type=int, default=1000)
parserPlotGlobalOptions.add_argument("--height", help="plot height", type=int)

parserOutputOptions = parser.add_argument_group('output options')
parserOutputOptions.add_argument("--addon", help="Specify addon behavior. 'on' will load specified addon files or filename_addon.py if found. 'off' does not load any addon files.", default='on', choices=['on', 'off'])
parserOutputOptions.add_argument("--addon-files", help="Specify addon files to source.", default=None, nargs='+')
parserOutputOptions.add_argument("--browser", help="open plot in the browser", default=False, action="store_true")
parserOutputOptions.add_argument("-o", "--output", help="export plot to file (html, pdf, svg, png, py, ...)", default=[], nargs='+')
parserOutputOptions.add_argument("-q", "--quiet", action="store_true", help="no warnings and don't open output file", default=False)
parserOutputOptions.add_argument("-v", "--version", action="version", version='%(prog)s ' + __version__ + ', available at ' + __url__)
parserOutputOptions.add_argument("--debug", action="store_true", help="output some debug informations", default=False)

argcomplete.autocomplete(parser)
args = parser.parse_args()

violinMode = 'overlay'
commentColour = ''
commentBackgroundColour = ''

if args.theme != 'palette':
    # We have chosen a theme, so just comment all colour settings out
    commentColour = '# '
    commentBackgroundColour = '' if args.background_colour else '# '
    # Better to show all legend entries now if not otherwise chosen

if not args.background_colour:
    args.background_colour = 'rgba(255, 255, 255, 0)'

if (not args.per_trace_colours and not args.per_frame_colours and not args.per_input_colours) or (args.per_trace_colours):
    args.per_trace_colours = True
    args.per_frame_colours, args.per_input_colours = False, False
elif (args.per_frame_colours):
    args.per_input_colours = False

# Sanitize arguments
for input in args.input:
    options = input['args']
    # options.ignore_icolumns = list(set(options.ignore_icolumns))
    # options.ignore_columns = list(set(options.ignore_columns))

    options.traceSpecialColumns = [options.special_column_prefix + x for x in traceSpecialColumns]
    options.frameSpecialColumns = [options.special_column_prefix + x for x in frameSpecialColumns]

    if (options.opacity == 'auto' and ((options.plot == 'box' and 'overlay' in args.box_mode) or
                                       (options.plot == 'violin' and 'overlay' in options.violin_mode) or
                                       (options.plot == 'bar' and 'overlay' in args.bar_mode))):
        options.opacity = 0.8
    elif options.opacity == 'auto':
        options.opacity = 1.0

    if options.orientation == 'auto':
        options.vertical = options.plot != 'line'
    elif options.orientation in ['vertical', 'v']:
        options.vertical = True
    else:
        options.vertical = False
    options.horizontal = not options.vertical

    if len(options.line_markers) == 0:
        options.line_markers = ['circle']

    if options.error == 'show':
        options.show_error = True
    else:
        options.show_error = False
    options.hide_error = not options.show_error

    if options.colour_cycle is None and (len(options.subplot_colours) > 0 or options.subplot_palette is not None or options.subplot_palette_count is not None):
        options.colour_cycle = 'subplot'

    if options.colour_cycle is None:
        options.colour_cycle = 'global'

    options.y_grid_colour = f"'{options.y_grid_colour}'" if options.y_grid_colour is not None else f"'{options.grid_colour}'" if options.grid_colour is not None else None
    options.x_grid_colour = f"'{options.x_grid_colour}'" if options.x_grid_colour is not None else f"'{options.grid_colour}'" if options.grid_colour is not None else None
    options.x_colour = f"'{options.y_colour}'" if options.y_colour is not None else None
    options.y_colour = f"'{options.x_colour}'" if options.x_colour is not None else None
    options.y_line_width_forced = 'y_line_width' in [i for (i, _) in options.ordered_args]
    options.x_line_width_forced = 'x_line_width' in [i for (i, _) in options.ordered_args]

    # auto, on or off
    translate = {'auto': None, 'on': True, 'off': False}
    for opt in ['y_show_grid', 'x_show_grid', 'y_show', 'x_show', 'y_show_zero_line', 'x_show_zero_line', 'y_show_tick_labels', 'x_show_tick_labels']:
      setattr(options, opt, translate[getattr(options, opt)])

    # auto, on, off or string
    for opt in ['y_mirror', 'x_mirror']:
      _check = getattr(options, opt)
      setattr(options, opt, translate[_check] if _check in translate else f"'{_check}'")

    # auto or float value
    translate = {'auto': None }
    for opt in ['bar_text_angle', 'line_marker_opacity', 'y_range_from', 'x_range_from', 'y_range_to', 'x_range_to', 'y_tickangle', 'x_tickangle', 'bar_width', 'bar_shift']:
      _check = getattr(options, opt)
      setattr(options, opt, translate[_check] if _check in translate else float(_check))

    options.y_tick0 = None if options.y_tick0 is None else f"'{options.y_tick0}'"
    options.x_tick0 = None if options.x_tick0 is None else f"'{options.x_tick0}'"
    options.y_dtick = None if options.y_dtick is None else f"'{options.y_dtick}'"
    options.x_dtick = None if options.x_dtick is None else f"'{options.x_dtick}'"

    options.y_autorange = 'off' if options.plot == 'dendrogram' and options.y_autorange is not None and not options.vertical else options.y_autorange
    options.x_autorange = 'off' if options.plot == 'dendrogram' and options.x_autorange is not None and not options.horizontal else options.x_autorange
    options.y_autorange = f"'{options.y_autorange}'" if options.y_autorange not in ['on', 'off', None] else False if options.y_autorange == 'off' or options.y_range_to is not None or options.y_range_from is not None else True
    options.x_autorange = f"'{options.x_autorange}'" if options.x_autorange not in ['on', 'off', None] else False if options.x_autorange == 'off' or options.x_range_to is not None or options.x_range_from is not None else True

    if options.legend_entries is None:
        if args.theme != 'palette':
            options.legend_entries = 'all'
        else:
            options.legend_entries = 'unique'


    if options.plot != 'line':
        # If explicitly set the range-to the automatic ranging would start at the
        # min value which is confusing, set the from to 0 for all but line plots
        if options.y_range_to is not None and options.y_range_from is None:
            options.y_range_from = 0
        if options.x_range_to is not None and options.x_range_from is None:
            options.x_range_from = 0

    violinMode = options.violin_mode


    if options.colour_scale_orientation == 'horizontal':
      options.colour_scale_orientation == 'h'
    elif options.colour_scale_orientation == 'vertical':
      options.colour_scale_orientation == 'v'


args.bar_gap = None if args.bar_gap == 'auto' else float(args.bar_gap)
args.master_title = f"'{args.master_title}'" if args.master_title is not None else None
args.y_master_title = f"'{args.y_master_title}'" if args.y_master_title is not None else None
args.x_master_title = f"'{args.x_master_title}'" if args.x_master_title is not None else None

if (args.legend_show is not None or args.legend_hide is not None):
    args.legend_show = not args.legend_hide
    args.legend_hide = not args.legend_show

# Setting the legend orientation if it was explicitly set
if args.legend_vertical is not None or args.legend_horizontal is not None:
    args.legend_vertical = not args.legend_horizontal
    args.legend_horizontal = not args.legend_vertical

if args.legend is not None:
    # If not legend orientation is set, set the default depending on the position
    if args.legend_horizontal is None:
        if args.legend.startswith('top') or args.legend.startswith('bottom') or args.legend.startswith('above') or args.legend.startswith('below'):
            args.legend_horizontal = True
        else:
            args.legend_horizontal = False
    args.legend_vertical = not args.legend_horizontal

    if (args.legend_y_anchor is None):
        if args.legend.startswith('middle') or args.legend.endswith('middle') or args.legend == 'center':
            args.legend_y_anchor = 'middle'
        elif args.legend.startswith('top') or args.legend.startswith('below'):
            args.legend_y_anchor = 'top'
        elif args.legend.startswith('bottom') or args.legend.startswith('above'):
            args.legend_y_anchor = 'bottom'

    if (args.legend_x_anchor is None):
        if args.legend.endswith('center') or args.legend == 'center':
            args.legend_x_anchor = 'center'
        elif args.legend.endswith('right') or args.legend.startswith('left'):
            args.legend_x_anchor = 'right'
        elif args.legend.endswith('left') or args.legend.startswith('right'):
            args.legend_x_anchor = 'left'

    if (args.legend_y is None):
        if args.legend.startswith('middle') or args.legend.endswith('middle') or args.legend == 'center':
            args.legend_y = 0.5
        elif args.legend.startswith('top') or args.legend.endswith('top'):
            args.legend_y = 1.0
        elif args.legend.startswith('bottom') or args.legend.endswith('bottom'):
            args.legend_y = 0.0
        elif args.legend.startswith('above'):
            args.legend_y = 1.0
        elif args.legend.startswith('below'):
            args.legend_y = -0.05

    if (args.legend_x is None):
        if args.legend.endswith('center') or args.legend == 'center':
            args.legend_x = 0.5
        elif args.legend.endswith('left'):
            args.legend_x = 0.0
        elif args.legend.endswith('right'):
            args.legend_x = 1.0
        elif args.legend.startswith('right'):
            args.legend_x = 1.02
        elif args.legend.startswith('left'):
            args.legend_x = -0.05

totalTraceCount = 0
totalFrameCount = 0
totalInputCount = 0
subplotGrid = [{'min': 1, 'max': 1}, {'min': 1, 'max': 1}]
subplotGridDefinition = {}
data = []

# None means it will be set automatically to True/False
defaultBottomMargin = True if args.x_master_title is not None else None
defaultLeftMargin = True if args.y_master_title is not None else None
defaultRightMargin = None
# Those are never set automatically so either use True or False
defaultTopMargin = True if args.master_title is not None else None
defaultPadMargin = False

doneSomething = False

stdinBuf = None
stdinReferenced = 0 if os.path.exists('stdin') else sum([1 if filename == 'stdin' else 0 for input in args.input for filename in input['value']])

for input in args.input:
    inputOptions = input['args']
    inputFileNames = input['value']
    inputOptions.filenames = inputFileNames
    inputOptions.traceCount = 0
    inputOptions.frameCount = 0
    inputOptions.frameIndex = 0
    inputFrames = []
    for filename in inputFileNames:
        buf = None

        if filename == 'stdin' and os.path.exists(filename) and not args.quiet:
            print("WARNING: reading in a file called 'stdin' instead of standard input")

        if not os.path.exists(filename) and not filename == 'stdin':
            raise Exception(f'Could not find file {filename}!')

        if args.debug:
            print(f"DEBUG: reading in {filename}", file=sys.stderr)

        if filename == 'stdin' and not os.path.exists(filename):
            if stdinBuf is None:
                stdinBuf = sys.stdin.buffer.read()
            buf = stdinBuf

            stdinReferenced -= 1
            if stdinReferenced <= 0:
               del stdinBuf
               stdinBuf = None
        else:
            buf = xopen.xopen(filename, mode='rb', threads=inputOptions.threads).read()

        try:
            frame = pickle.loads(buf)
        except Exception:
            frame = None

        options = copy.deepcopy(inputOptions)
        options.filenames = [filename]

        if frame is not None:
            if not args.quiet and inputOptions.no_index:
                print(f"WARNING: ignoring --no-index for {filename}", file=sys.stderr)
            if not args.quiet and inputOptions.no_columns:
                print(f"WARNING: ignoring --no-columns for {filename}", file=sys.stderr)

            if (isinstance(frame, list)):
                for f in frame:
                    if (not isinstance(f, pandas.DataFrame)):
                        raise Exception(f'pickle file {filename} is not a list of pandas dataframes!')
                    inputFrames.append((copy.deepcopy(options), f))
            elif (not isinstance(frame, pandas.DataFrame)):
                raise Exception(f'pickle file {filename} is not a pandas data frame!')
            else:
                frame = frame.apply(pandas.to_numeric, errors='ignore')
                frame = frame.applymap(lambda x: float(x) if isFloat(x) else numpy.nan if isinstance(x, str) and x.lower() in DataframeActions.considerAsNaN else x)

                frame = DataframeActions.transformColumnIds(frame, ['index'], DataframeActions.transformers['index'])
                frame = DataframeActions.transformRowIds(frame, ['columns'], DataframeActions.transformers['index'])

                inputFrames.append((options, frame))
        else:
            # Do not use pandas own comment option, as we only want to exclude lines that start with a comment character to allow it in the data
            if options.comment is not None and len(options.comment) > 0:
              if args.debug:
                print("DEBUG: parsing out comments", file=sys.stderr)
              buf = b''.join([line for line in io.BytesIO(buf).readlines() if not line.decode().strip(' ').startswith(options.comment)])

            if args.debug:
              print("DEBUG: parsing csv data", file=sys.stderr)

            useDelimiter = options.delimiter
            # FUTURE: pyarrow
            # useDelimiter = options.delimiter if options.delimiter is not None else csv.Sniffer().sniff(io.BytesIO(buf).read(4096).decode()).delimiter
            
            emptyBuf = len(buf) == 0 or buf == b'\n'
            frame = pandas.DataFrame() if emptyBuf else pandas.read_csv(io.BytesIO(buf),
                                    sep=useDelimiter,
                                    header=None,
                                    index_col=0 if not options.no_index else None,
                                    dtype=object,
                                    engine='python'
                                    # FUTURE: pyarrow
                                    # engine='pyarrow'
                    )

            if not options.no_index and not emptyBuf:
                frame.index.name = frame.iloc[0].name

            if not options.no_columns and not emptyBuf:
                frame.columns = frame.iloc[0]
                frame.columns.name = None
                frame = DataframeActions.dropRowsByIds(frame, [0])

            if args.debug:
              print("DEBUG: parsing number formats", file=sys.stderr)
            # Relatively slow, though I don't see a way around it
            frame = frame.apply(pandas.to_numeric, errors='ignore')
            frame = frame.applymap(DataframeActions.transformers['float'])

            frame = DataframeActions.transformColumnIds(frame, ['index'], DataframeActions.transformers['index'])
            frame = DataframeActions.transformRowIds(frame, ['columns'], DataframeActions.transformers['index'])

            options.frameCount = 1
            options.frameIndex = inputOptions.frameCount
            inputFrames.append((options, frame))
            inputOptions.frameCount += 1

        del buf
        gc.collect()
        if args.debug:
            print("DEBUG: done reading file", file=sys.stderr)

    selectMode = 'all'
    normaliseFunction = [DataframeActions.functions['soloComputeFunctions'][0]]
    sortFunction = ['none']
    addAt = 'back'
    addFunction = [DataframeActions.functions['soloComputeFunctions'][0]]
    applyMode = 'normal'
    applyFunction = [DataframeActions.functions['pandasSelfFunctions'][0]]
    groupFunction = [DataframeActions.functions['soloComputeFunctions'][0]]
    sortOrder = 'asc'
    outputPrecision = None

    focusedFrames = list(range(len(inputFrames)))

    for (optionName, optionValue) in input['args'].ordered_args:
        multiFrameActions = ['print_precision', 'select_mode', 'normalise_function', 'sort_function', 'sort_order', 'add_at', 'add_function', 'apply_mode', 'apply_function', 'group_function', 'join', 'file', 'pickle', 'split_column', 'split_icolumn', 'split_row', 'split_irow', 'focus_iframes', 'defocus_iframes', 'focus_frames', 'defocus_frames', 'select_iframes', 'select_frames', 'ignore_iframes', 'ignore_frames', 'frame_names']
        if optionName not in multiFrameActions:
            for _index, (frameOptions, frame) in enumerate(inputFrames):
                if (_index) not in focusedFrames:
                    continue
                if optionName == 'transpose':
                    frame = DataframeActions.transpose(frame)
                elif optionName == 'index_column' or optionName == 'columns_row':
                    if optionName == 'columns_row':
                        frame = frame.T
                    columnIds = DataframeActions.getColumnIds(frame, optionValue, selectMode)
                    if (len(columnIds) > 0):
                        frame = DataframeActions.setIndexColumnByIdx(frame, columnIds[0], True)
                    if optionName == 'columns_row':
                        frame = frame.T
                elif optionName == 'index_icolumn' or optionName == 'columns_irow':
                    if optionName == 'columns_irow':
                        frame = frame.T
                    columnIds = DataframeActions.sliceToColumnIds(frame, optionValue)
                    if (len(columnIds) > 0):
                        frame = DataframeActions.setIndexColumnByIdx(frame, columnIds[0], True)
                    if optionName == 'columns_irow':
                        frame = frame.T
                elif optionName == 'reset_index':
                    frame = DataframeActions.resetIndex(frame)
                elif optionName == 'drop_index':
                    frame = DataframeActions.dropIndex(frame)
                elif optionName == 'reset_columns':
                    frame = DataframeActions.resetColumns(frame)
                elif optionName == 'drop_columns':
                    frame = DataframeActions.dropColumns(frame)
                elif optionName == 'ignore_columns':
                    columnIds = DataframeActions.getColumnIds(frame, optionValue, selectMode, True)
                    frame = DataframeActions.dropColumnsByIds(frame, columnIds)
                elif optionName == 'ignore_icolumns':
                    columnIds = DataframeActions.sliceToColumnIds(frame, optionValue)
                    frame = DataframeActions.dropColumnsByIds(frame, columnIds)
                elif optionName == 'ignore_rows':
                    rowIds = DataframeActions.getRowIds(frame, optionValue, selectMode, True)
                    frame = DataframeActions.dropRowsByIds(frame, rowIds)
                elif optionName == 'ignore_irows':
                    rowIds = DataframeActions.sliceToRowIds(frame, optionValue)
                    frame = DataframeActions.dropRowsByIds(frame, rowIds)
                elif optionName == 'select_columns':
                    columnIds = DataframeActions.getColumnIds(frame, optionValue, selectMode, True)
                    frame = DataframeActions.selectColumnsByIds(frame, columnIds)
                elif optionName == 'select_icolumns':
                    columnIds = DataframeActions.sliceToColumnIds(frame, optionValue)
                    frame = DataframeActions.selectColumnsByIds(frame, columnIds)
                elif optionName == 'select_rows':
                    rowIds = DataframeActions.getRowIds(frame, optionValue, selectMode, True)
                    frame = DataframeActions.selectRowsByIds(frame, rowIds)
                elif optionName == 'select_irows':
                    rowIds = DataframeActions.sliceToRowIds(frame, optionValue)
                    frame = DataframeActions.selectRowsByIds(frame, rowIds)
                elif optionName == 'filter_column':
                    columnIds = DataframeActions.getColumnIds(frame, optionValue[0], selectMode, True)
                    if len(columnIds) == 0:
                        if not args.quiet:
                            print(f"WARNING: column '{optionValue[0]}' not found", file=sys.stderr)
                    elif len(optionValue) < 3:
                        if not args.quiet:
                            print('WARNING: filter condition and/or filter data not provided', file=sys.stderr)
                    else:
                        frame = DataframeActions.filterRowsByColumnData(frame, columnIds, optionValue[1:])
                elif optionName == 'filter_icolumns':
                    if optionValue[0] != 'index' and not isSliceType(optionValue[0]):
                        if not args.quiet:
                            print('WARNING: invalid slice provided to filter data', file=sys.stderr)
                    elif len(optionValue) < 3:
                        if not args.quiet:
                            print('WARNING: filter condition and/or filter data not provided', file=sys.stderr)
                    elif optionValue[0] == 'index':
                        frame = DataframeActions.filterRowsByColumnData(DataframeActions.resetIndex(frame), [0], optionValue[1:])
                        frame = DataframeActions.setIndexColumnByIdx(frame, 0, True)
                    else:
                        columnIds = DataframeActions.sliceToColumnIds(frame, SliceType()(optionValue[0]))
                        frame = DataframeActions.filterRowsByColumnData(frame, columnIds, optionValue[1:])
                elif optionName == 'filter_row':
                    rowIds = DataframeActions.getRowIds(frame, optionValue[0], selectMode, True)
                    if len(rowIds) == 0:
                        if not args.quiet:
                            print(f"WARNING: row '{optionValue[0]}' not found", file=sys.stderr)
                    elif len(optionValue) < 3:
                        if not args.quiet:
                            print('WARNING: filter condition and/or filter data not provided', file=sys.stderr)
                    else:
                        frame = DataframeActions.filterColumnsByRowData(frame, rowIds, optionValue[1:])
                elif optionName == 'filter_irows':
                    if optionValue[0] != 'columns' and not isSliceType(optionValue[0]):
                        if not args.quiet:
                            print('WARNING: invalid slice provided to filter data', file=sys.stderr)
                    elif len(optionValue) < 3:
                        if not args.quiet:
                            print('WARNING: filter condition and/or filter data not provided', file=sys.stderr)
                    elif optionValue[0] == 'columns':
                        frame = DataframeActions.filterRowsByColumnData(DataframeActions.resetIndex(frame.T), [0], optionValue[1:])
                        frame = DataframeActions.setIndexColumnByIdx(frame, 0, True).T
                    else:
                        rowIds = DataframeActions.sliceToRowIds(frame, SliceType()(optionValue[0]))
                        frame = DataframeActions.filterColumnsByRowData(frame, rowIds, optionValue[1:])
                elif optionName == 'reverse_columns':
                    frame = DataframeActions.reverseColumns(frame)
                elif optionName == 'reverse_rows':
                    frame = DataframeActions.reverseRows(frame)
                elif optionName == 'sort_columns':
                    frame = DataframeActions.sortColumns(frame, sortFunction, sortOrder)
                elif optionName == 'sort_rows':
                    frame = DataframeActions.sortRows(frame, sortFunction, sortOrder)
                elif optionName == 'sort_by_icolumns':
                    if 'index' in optionValue:
                      columnIds = [v for lv in [[0] if ov == 'index' else [x + 1 for x in DataframeActions.sliceToColumnIds(frame, ov)] for ov in optionValue] for v in lv]
                      frame = DataframeActions.resetIndex(frame)
                    else:
                      columnIds = DataframeActions.sliceToColumnIds(frame, optionValue)
                    frame = DataframeActions.sortRowsByColumnIds(frame, columnIds, sortFunction, sortOrder)
                    if 'index' in optionValue:
                      frame = DataframeActions.setIndexColumnByIdx(frame, 0, True)
                elif optionName == 'sort_by_irows':
                    if 'columns' in optionValue:
                      rowIds = [v for lv in [[0] if ov == 'columns' else [x + 1 for x in DataframeActions.sliceToRowIds(frame, ov)] for ov in optionValue] for v in lv]
                      frame = DataframeActions.resetColumns(frame)
                    else:
                      rowIds = DataframeActions.sliceToRowIds(frame, optionValue)
                    frame = DataframeActions.sortColumnsByRowIds(frame, rowIds, sortFunction, sortOrder)
                    if 'columns' in optionValue:
                      frame = DataframeActions.setColumnsRowByIdx(frame, 0, True)
                elif optionName == 'sort_by_columns':
                    columnIds = DataframeActions.getColumnIds(frame, optionValue, selectMode)
                    frame = DataframeActions.sortRowsByColumnIds(frame, columnIds, sortFunction, sortOrder)
                elif optionName == 'sort_by_rows':
                    rowIds = DataframeActions.getRowIds(frame, optionValue, selectMode)
                    frame = DataframeActions.sortColumnsByRowIds(frame, rowIds, sortFunction, sortOrder)
                elif optionName == 'normalise_to':
                    frame = DataframeActions.normaliseToConstant(frame, optionValue)
                elif optionName == 'normalise_icolumns':
                    applyColumnIds = DataframeActions.sliceToColumnIds(frame, optionValue)
                    frame = DataframeActions.normaliseColumnIds(frame, applyColumnIds, normaliseFunction)
                elif optionName == 'normalise_columns':
                    applyColumnIds = DataframeActions.getColumnIds(frame, optionValue, selectMode)
                    frame = DataframeActions.normaliseColumnIds(frame, applyColumnIds, normaliseFunction)
                elif optionName == 'normalise_irows':
                    applyRowIds = DataframeActions.sliceToRowIds(frame, optionValue)
                    frame = DataframeActions.normaliseRowIds(frame, applyRowIds, normaliseFunction)
                elif optionName == 'normalise_rows':
                    applyRowIds = DataframeActions.getRowIds(frame, optionValue, selectMode)
                    frame = DataframeActions.normaliseRowIds(frame, applyRowIds, normaliseFunction)
                elif optionName == 'normalise_to_column':
                    applyColumnIds = DataframeActions.getColumnIds(frame, optionValue, selectMode)
                    if len(applyColumnIds) > 1 and not args.quiet:
                        print('WARNING: will only normalise to the first column selected', file=sys.stderr)
                    targetColumnIds = DataframeActions.sliceToColumnIds(frame, slice(None))
                    if len(applyColumnIds) > 0 and len(targetColumnIds) > 0:
                        frame = DataframeActions.applyOnColumns(frame, applyColumnIds[0], targetColumnIds, ['div'])
                elif optionName == 'normalise_to_icolumn':
                    applyColumnIds = DataframeActions.sliceToColumnIds(frame, optionValue)
                    if (len(applyColumnIds) > 1 and not args.quiet):
                        print('WARNING: will only normalise to the first single column', file=sys.stderr)
                    targetColumnIds = DataframeActions.sliceToColumnIds(frame, slice(None))
                    if len(applyColumnIds) > 0 and len(targetColumnIds) > 0:
                        frame = DataframeActions.applyOnColumns(frame, applyColumnIds[0], targetColumnIds, ['div'])
                elif optionName == 'normalise_to_row':
                    applyRowIds = DataframeActions.getRowIds(frame, optionValue, selectMode)
                    if len(applyRowIds) > 1 and not args.quiet:
                        print('WARNING: will only normalise to the first row selected', file=sys.stderr)
                    targetRowIds = DataframeActions.sliceToRowIds(frame, slice(None))
                    if len(applyRowIds) > 0 and len(targetRowIds) > 0:
                        frame = DataframeActions.applyOnRows(frame, applyRowIds[0], targetRowIds, ['div'])
                elif optionName == 'normalise_to_irow':
                    applyRowIds = DataframeActions.sliceToRowIds(frame, optionValue)
                    if (len(applyRowIds) > 1 and not args.quiet):
                        print('WARNING: will only normalise to the first row selected', file=sys.stderr)
                    targetRowIds = DataframeActions.sliceToRowIds(frame, slice(None))
                    if len(applyRowIds) > 0 and len(targetRowIds) > 0:
                        frame = DataframeActions.applyOnRows(frame, applyRowIds[0], targetRowIds, ['div'])
                elif optionName == 'abs':
                    frame = DataframeActions.abs(frame)
                elif optionName == 'apply_irows':
                  targetComputes = applyFunction[0] not in DataframeActions.functions['nonComputeFunctions']
                  if 'columns' in optionValue:
                    applyRowIds = [0] if optionValue[0] == 'columns' else [x + 1 for x in DataframeActions.sliceToRowIds(frame, optionValue[0])]
                    targetRowIds = [v for lV in [[x + 1 for x in DataframeActions.sliceToRowIds(frame, oV)] if oV != 'columns' else [0] for oV in optionValue[1:]] for v in lV] if len(optionValue) > 1 else []
                    frame = DataframeActions.resetColumns(frame, DataframeActions.transformers['float'] if targetComputes else DataframeActions.transformers['identity'])
                  else:
                    applyRowIds = DataframeActions.sliceToRowIds(frame, optionValue[0])
                    targetRowIds = DataframeActions.sliceToRowIds(frame, optionValue[1:]) if len(optionValue) > 1 else []
                  frame = DataframeActions.applyOnRows(frame, applyRowIds, targetRowIds, applyFunction, applyMode)
                  if 'columns' in optionValue:
                    frame = DataframeActions.setColumnsRowByIdx(frame, 0, True, DataframeActions.transformers['index'] if targetComputes else DataframeActions.transformers['str'])
                elif optionName == 'apply_rows':
                  applyRowIds = DataframeActions.getRowIds(frame, optionValue[0])
                  targetRowIds = DataframeActions.getRowIds(frame, optionValue[1:]) if len(optionValue) > 1 else []
                  frame = DataframeActions.applyOnRows(frame, applyRowIds, targetRowIds, applyFunction, applyMode)
                elif optionName == 'apply_icolumns':
                  targetComputes = applyFunction[0] not in DataframeActions.functions['nonComputeFunctions']
                  if 'index' in optionValue:
                    applyColumnIds = [0] if optionValue[0] == 'index' else DataframeActions.sliceToColumnIds(frame, optionValue[0])
                    targetColumnIds = [v for lV in [[x + 1 for x in DataframeActions.sliceToColumnIds(frame, oV)] if oV != 'index' else [0] for oV in optionValue[1:]] for v in lV] if len(optionValue) > 1 else []
                    frame = DataframeActions.resetIndex(frame, DataframeActions.transformers['float'] if targetComputes else DataframeActions.transformers['identity'])
                  else:
                    applyColumnIds = DataframeActions.sliceToColumnIds(frame, optionValue[0])
                    targetColumnIds = DataframeActions.sliceToColumnIds(frame, optionValue[1:]) if len(optionValue) > 1 else []
                  frame = DataframeActions.applyOnColumns(frame, applyColumnIds, targetColumnIds, applyFunction, applyMode)
                  if 'index' in optionValue:
                    frame = DataframeActions.setIndexColumnByIdx(frame, 0, True, DataframeActions.transformers['index'] if targetComputes else DataframeActions.transformers['str'])
                elif optionName == 'apply_columns':
                    applyColumnIds = DataframeActions.getColumnIds(frame, optionValue[0])
                    targetColumnIds = DataframeActions.getColumnIds(frame, optionValue[1:]) if len(optionValue) > 1 else []
                    frame = DataframeActions.applyOnColumns(frame, applyColumnIds, targetColumnIds, applyFunction, applyMode)
                elif optionName == 'group_by_irows':
                    if 'columns' in optionValue:
                      rowIds = [0] + [x + 1 for x in DataframeActions.sliceToRowIds(frame, [x for x in optionValue if x != 'columns'])]
                      frame = DataframeActions.resetColumns(frame)
                    else:
                      rowIds = DataframeActions.sliceToRowIds(frame, optionValue)
                    frame = DataframeActions.groupByRowIds(frame, rowIds, groupFunction, options.special_column_prefix)
                elif optionName == 'group_by_icolumns':
                    if 'index' in optionValue:
                      columnIds = [0] + [x + 1 for x in DataframeActions.sliceToColumnIds(frame, [x for x in optionValue if x != 'index'])]
                      frame = DataframeActions.resetIndex(frame)
                    else:
                      columnIds = DataframeActions.sliceToColumnIds(frame, optionValue)
                    frame = DataframeActions.groupByColumnIds(frame, columnIds, groupFunction, options.special_column_prefix)
                elif optionName == 'group_by_rows':
                    rowIds = DataframeActions.getRowIds(frame, optionValue, selectMode)
                    frame = DataframeActions.groupByRowIds(frame, rowIds, groupFunction, options.special_column_prefix)
                elif optionName == 'group_by_columns':
                    columnIds = DataframeActions.getColumnIds(frame, optionValue, selectMode)
                    frame = DataframeActions.groupByColumnIds(frame, columnIds, groupFunction, options.special_column_prefix)
                elif optionName == 'group_to_column':
                  frame = DataframeActions.groupAllColumns(frame, optionValue, groupFunction)
                elif optionName == 'group_to_row':
                    frame = DataframeActions.groupAllRows(frame, optionValue, groupFunction)
                elif optionName == 'group_n_columns':
                  frame = DataframeActions.groupNColumns(frame, optionValue, groupFunction, args.quiet)
                elif optionName == 'group_n_rows':
                    frame = DataframeActions.groupNRows(frame, optionValue, groupFunction, args.quiet)
                elif optionName in ['stack_irow', 'stack_row', 'unstack_irow', 'unstack_row']:
                  if optionName.endswith('irow'):
                    rowIds = ['columns'] if optionValue == 'columns' else DataframeActions.sliceToRowIds(frame, optionValue)
                  else:
                    rowIds = DataframeActions.getRowIds(frame, optionValue, selectMode)
                  if len(rowIds) > 1 and not args.quiet:
                    print(f"WARNING: will only {optionName.split('_')[0]} first row selected", file=sys.stderr)
                  if len(rowIds) == 0:
                    if not args.quiet:
                      print(f"WARNING: could not find row'{optionValue}' to {optionName.split('_')[0]}", file=sys.stderr)
                  else:
                    if optionName.startswith('stack'):
                      frame = DataframeActions.stackRowId(frame, rowIds[0])
                    else:
                      frame = DataframeActions.unstackRowId(frame, rowIds[0])
                elif optionName in ['stack_icolumn', 'stack_column', 'unstack_icolumn', 'unstack_column']:
                  if optionName.endswith('icolumn'):
                    columnIds = ['index'] if optionValue == 'index' else DataframeActions.sliceToColumnIds(frame, optionValue)
                  else:
                    columnIds = DataframeActions.getColumnIds(frame, optionValue, selectMode)
                  if len(columnIds) > 1 and not args.quiet:
                    print(f"WARNING: will only {optionName.split('_')[0]} first column selected", file=sys.stderr)
                  if len(columnIds) == 0:
                    if not args.quiet:
                      print(f"WARNING: could not find column'{optionValue}' to {optionName.split('_')[0]}", file=sys.stderr)
                  else:
                    if optionName.startswith('stack'):
                      frame = DataframeActions.stackColumnId(frame, columnIds[0])
                    else:
                      frame = DataframeActions.unstackColumnId(frame, columnIds[0])
                elif optionName == 'add_column':
                    frame = DataframeActions.addColumn(frame, optionValue, addFunction, addAt)
                elif optionName == 'add_row':
                    frame = DataframeActions.addRow(frame, optionValue, addFunction, addAt)
                elif optionName == 'data_offset':
                    frame = DataframeActions.addConstant(frame, optionValue)
                elif optionName == 'data_scale':
                    frame = DataframeActions.scaleConstant(frame, optionValue)
                elif optionName == 'column_names':
                    frame = DataframeActions.renameColumns(frame, list(range(frame.shape[1])), optionValue)
                elif optionName == 'icolumn_names':
                    if optionValue[0] != 'index' or not isSliceType(optionValue[0]):
                        if not args.quiet:
                            print(f'WARNING: invalid index or slice provided to {optionName}', file=sys.stderr)
                    if len(optionValue) > 1:
                        columnIds = ['index'] if optionValue[0] == 'index' else DataframeActions.sliceToColumnIds(frame, SliceType()(optionValue[0]))
                        frame = DataframeActions.renameColumns(frame, columnIds, optionValue[1:])
                elif optionName == 'row_names':
                    frame = DataframeActions.renameRows(frame, list(range(frame.shape[0])), optionValue)
                elif optionName == 'irow_names':
                    if optionValue[0] != 'columns' or not isSliceType(optionValue[0]):
                        if not args.quiet:
                            print(f'WARNING: invalid index or slice provided to {optionName}', file=sys.stderr)
                    if len(optionValue) > 1:
                        rowIds = ['columns'] if optionValue[0] == 'columns' else DataframeActions.sliceToRowIds(frame, SliceType()(optionValue[0]))
                        frame = DataframeActions.renameRows(frame, rowIds, optionValue[1:])
                elif optionName == 'drop_nan':
                    frame = DataframeActions.dropNaN(frame)
                elif optionName == 'drop_any_nan':
                    frame = DataframeActions.dropNaN(frame, True)
                elif optionName == 'print':
                    DataframeActions.printFrames(frameOptions.filenames, frame, _index, len(inputFrames), outputPrecision)
                    doneSomething = True

                inputFrames[_index] = (frameOptions, frame)
        else:
            if optionName in ['focus_frames', 'defocus_frames', 'focus_iframes', 'defocus_iframes']:
                if optionName in ['focus_frames', 'defocus_frames']:
                    optionValue = getSliceTypesFromList([f.index.name for (_, f) in inputFrames], optionValue, selectMode)

                if optionName in ['focus_frames', 'focus_iframes']:
                    focusedFrames = sorted(getSliceTypeIds(range(len(inputFrames)), optionValue))
                else:
                    focusedFrames = [f for f in focusedFrames if f not in getSliceTypeIds(range(len(inputFrames)), optionValue)]
            elif optionName in ['select_frames', 'select_iframes', 'ignore_frames', 'ignore_iframes']:
                if optionName in ['select_frames', 'ignore_frames']:
                    optionValue = getSliceTypesFromList([f.index.name for (_, f) in inputFrames], optionValue, selectMode)

                if optionName in ['select_frames', 'select_iframes']:
                    inputFrames = [(copy.deepcopy(inputFrames[i][0]), inputFrames[i][1].copy()) for i in getSliceTypeIds(range(len(inputFrames)), optionValue)]
                else:
                    inputFrames = [x for i, x in enumerate(inputFrames) if i not in getSliceTypeIds(range(len(inputFrames)), optionValue)]
                focusedFrames = list(range(len(inputFrames)))
            elif optionName == 'frame_names':
                for i, iF in enumerate(focusedFrames):
                    if i >= len(optionValue):
                        break
                    inputFrames[iF][1].index.name = optionValue[i]
            elif optionName == 'print_precision':
                outputPrecision = None if optionValue == 'default' else int(optionValue)
            elif optionName == 'select_mode':
                selectMode = optionValue
            elif optionName == 'normalise_function':
                normaliseFunction = optionValue
            elif optionName == 'sort_function':
                sortFunction = optionValue
            elif optionName == 'sort_order':
                sortOrder = optionValue
            elif optionName == 'add_at':
                addAt = optionValue
            elif optionName == 'add_function':
                addFunction = optionValue
            elif optionName == 'apply_mode':
                applyMode = optionValue
            elif optionName == 'apply_function':
                applyFunction = optionValue
            elif optionName == 'group_function':
                groupFunction = optionValue
            elif optionName == 'join':
              if optionValue[0] not in ['index', 'columns']:
                raise 'ERROR: first parameter to join must be either \'index\' or \'columns\''
              if len(optionValue) > 1 and optionValue[1] not in ['outer', 'inner']:
                raise 'ERROR: second parameter to join must be either \'outer\' or \'inner\''
              newOptions = copy.deepcopy(inputOptions)
              newOptions.filenames = []
              newOptions.frameCount = 1
              frontDefocusedFrames = [inputFrames[x] for x in range(focusedFrames[0]) if x not in focusedFrames]
              backDefocusedFrames = [inputFrames[x] for x in range(focusedFrames[0], len(inputFrames)) if x not in focusedFrames]
              joinedFrame = DataframeActions.joinFrames([frame for (_, frame) in [inputFrames[x] for x in focusedFrames]], optionValue[0], 'outer' if len(optionValue) < 2 else optionValue[1])
              inputFrames = frontDefocusedFrames + [(newOptions, joinedFrame)] + backDefocusedFrames
              focusedFrames = [len(frontDefocusedFrames)]
            elif optionName == 'file':
                DataframeActions.framesToCSV([frame for (_, frame) in [inputFrames[x] for x in focusedFrames]], optionValue, inputOptions.delimiter, args.quiet, inputOptions.threads, outputPrecision)
                doneSomething = True
            elif optionName == 'pickle':
                DataframeActions.framesToPickle([frame for (_, frame) in [inputFrames[x] for x in focusedFrames]], optionValue, args.quiet, inputOptions.threads)
                doneSomething = True
            elif optionName in ['split_column', 'split_icolumn', 'split_row', 'split_irow']:
                newInputFrames = []
                newFocusedFrames = []
                for _index, (frameOptions, frame) in enumerate(inputFrames):
                    if (_index) not in focusedFrames:
                        newInputFrames.append((frameOptions, frame))
                        continue
                    newFrames = []
                    if optionName == 'split_icolumn':
                        newFrames = DataframeActions.splitFramesByColumnIdx([frame], optionValue)
                    elif optionName == 'split_column':
                        columnIds = DataframeActions.getColumnIds(frame, optionValue, selectMode)
                        newFrames = DataframeActions.splitFramesByColumnIdx([frame], columnIds[0])
                    elif optionName == 'split_irow':
                        newFrames = DataframeActions.splitFramesByRowIdx([frame], optionValue)
                    elif optionName == 'split_row':
                        rowIds = DataframeActions.getRowIds(frame, optionValue, selectMode)
                        newFrames = DataframeActions.splitFramesByRowIdx([frame], rowIds[0])
                    newFocusedFrames.extend(range(len(newInputFrames), len(newInputFrames) + len(newFrames)))
                    for newFrame in newFrames:
                        newOptions = copy.deepcopy(inputOptions)
                        newOptions.filenames = []
                        newOptions.frameCount = 1
                        newInputFrames.append((newOptions, newFrame))
                focusedFrames = newFocusedFrames
                inputFrames = newInputFrames

        if args.debug:
            print(f"DEBUG: applied {optionName}", file=sys.stderr)
        gc.collect()

    defocusedFrames = [i for i in range(len(inputFrames)) if i not in focusedFrames]
    if not args.quiet and len(defocusedFrames) > 0:
        print(f'WARNING: {len(defocusedFrames)} frames are defocused and will be ignored', file=sys.stderr)

    inputOptions.traceCount = 0
    inputOptions.frameCount = 0
    for _index, (options, frame) in enumerate(inputFrames):
        if _index not in focusedFrames:
            continue
        inputOptions.frameCount += 1
        inputOptions.traceCount += len([x for x in frame.columns if str(x) not in inputOptions.traceSpecialColumns and str(x) not in inputOptions.frameSpecialColumns])
        frame = DataframeActions.transformRowIds(DataframeActions.transformColumnIds(frame, ['index'], DataframeActions.transformers['mixed']), ['columns'], DataframeActions.transformers['mixed'])
        frame = frame.replace([numpy.inf, -numpy.inf], numpy.nan)
        frame = frame.where(pandas.notnull(frame), None)

        inputFrames[_index] = (options, frame)

    if inputOptions.frameCount == 0:
        if not args.quiet:
            print(f'WARNING: files {", ".join(inputFileNames)} did turn into any valid dataframes', file=sys.stderr)
        continue

    inputOptions.inputIndex = totalInputCount
    totalTraceCount += inputOptions.traceCount
    totalFrameCount += inputOptions.frameCount
    totalInputCount += 1

    updateRange(subplotGrid, [inputOptions.col + (inputOptions.colspan - 1), inputOptions.row + (inputOptions.rowspan - 1)])
    if (inputOptions.row not in subplotGridDefinition):
        subplotGridDefinition[inputOptions.row] = {}
    if (inputOptions.col not in subplotGridDefinition[inputOptions.row]):
        subplotGridDefinition[inputOptions.row][inputOptions.col] = copy.deepcopy({
            'rowspan': inputOptions.rowspan,
            'colspan': inputOptions.colspan,
            'secondary_y': inputOptions.y_secondary,
            'title': inputOptions.title,
            'traces': 0,
            'frames': 0,
            'colours': args.colours,
            'palette': args.palette,
            'palette_count': args.palette_count,
            'palette_local': inputOptions.colour_cycle == 'subplot',
            'palette_reverse': args.palette_reverse,
            'palette_index': args.palette_start,
            'palette_opacity': args.palette_opacity,
            'palette_cmap': args.palette_cmap
        })

    subplotGridDefinition[inputOptions.row][inputOptions.col]['rowspan'] = max(inputOptions.rowspan, subplotGridDefinition[inputOptions.row][inputOptions.col]['rowspan'])
    subplotGridDefinition[inputOptions.row][inputOptions.col]['colspan'] = max(inputOptions.colspan, subplotGridDefinition[inputOptions.row][inputOptions.col]['colspan'])
    subplotGridDefinition[inputOptions.row][inputOptions.col]['secondary_y'] = inputOptions.y_secondary or subplotGridDefinition[inputOptions.row][inputOptions.col]['secondary_y']
    if inputOptions.title is not None:
        subplotGridDefinition[inputOptions.row][inputOptions.col]['title'] = inputOptions.title

    if len(inputOptions.subplot_colours) > 0:
        subplotGridDefinition[inputOptions.row][inputOptions.col]['colours'] = copy.copy(inputOptions.subplot_colours)
    if inputOptions.subplot_palette is not None:
        subplotGridDefinition[inputOptions.row][inputOptions.col]['palette'] = inputOptions.subplot_palette
    if inputOptions.subplot_palette_reverse is not None:
        subplotGridDefinition[inputOptions.row][inputOptions.col]['palette_reverse'] = inputOptions.subplot_palette_reverse
    if inputOptions.subplot_palette_count is not None:
        subplotGridDefinition[inputOptions.row][inputOptions.col]['palette_count'] = inputOptions.subplot_palette_count
    if inputOptions.subplot_palette_start is not None:
        subplotGridDefinition[inputOptions.row][inputOptions.col]['palette_index'] = inputOptions.subplot_palette_start
    if inputOptions.subplot_palette_opacity is not None:
        subplotGridDefinition[inputOptions.row][inputOptions.col]['palette_opacity'] = inputOptions.subplot_palette_opacity
    if inputOptions.subplot_palette_opacity is not None:
        subplotGridDefinition[inputOptions.row][inputOptions.col]['palette_opacity'] = inputOptions.subplot_palette_opacity
    if inputOptions.subplot_palette_cmap is not None:
        subplotGridDefinition[inputOptions.row][inputOptions.col]['palette_cmap'] = inputOptions.subplot_palette_cmap

    inputOptions.subplotTraceIndex = subplotGridDefinition[inputOptions.row][inputOptions.col]['traces']
    subplotGridDefinition[inputOptions.row][inputOptions.col]['traces'] += inputOptions.traceCount
    subplotGridDefinition[inputOptions.row][inputOptions.col]['frames'] += inputOptions.frameCount

    if inputOptions.plot == 'dendrogram' and subplotGridDefinition[inputOptions.row][inputOptions.col]['palette_count'] is None:
        subplotGridDefinition[inputOptions.row][inputOptions.col]['palette_count'] = 7

    data.append({'options': copy.deepcopy(inputOptions), 'frames': [f.where(pandas.notnull(f), None) for f in [f for i, (_, f) in enumerate(inputFrames) if i in focusedFrames]]})

    gc.collect()


scriptPath = None
secondaryScriptPaths = []
# Separate python outputs from actual output put the first script
# into scriptPath and all others into secondaryScriptPaths,
# scriptPath is master all others are secondary
for x in args.output:
    if x.lower().endswith('.py'):
        if scriptPath is None:
            scriptPath = x
        else:
            secondaryScriptPaths.append(x)

# Remove scripts from output
args.output = [x for x in args.output if not x.lower().endswith('.py')]


if doneSomething and not args.browser and len(args.output) == 0 and scriptPath is None:
    exit(0)
elif len(args.output) == 0 and scriptPath is None:
    args.browser = True

if totalTraceCount == 0:
    if not args.quiet:
        print('No input data available for plotting.')
    exit(0)

# Plotting script will be executed in this path context
if scriptPath is not None:
    scriptContext = os.path.abspath(os.path.dirname(scriptPath))
else:
    scriptContext = os.path.abspath(os.getcwd())

# Converting paths to new relative paths to the plotting script
# In case a script is saved, those paths are still valid no matter from where its called
for i, p in enumerate(args.output):
    if not os.path.isabs(p):
        args.output[i] = os.path.relpath(p, scriptContext)


if args.theme == 'palette' and args.colour_debug:
    print('Colour Palettes:')

subplotGrid = [{k: int(v) for (k, v) in dim.items()} for dim in subplotGrid]

for r in range(1, int(subplotGrid[1]['max']) + 1):
    for c in range(1, int(subplotGrid[0]['max']) + 1):
        if r in subplotGridDefinition and c in subplotGridDefinition[r]:
            subplot = subplotGridDefinition[r][c]
            if subplot['palette_count'] is None:
                if subplot['palette_local']:
                    subplot['palette_count'] = subplot['traces'] if args.per_trace_colours else subplot['frames'] if args.per_frame_colours else 1
                else:
                    subplot['palette_count'] = totalTraceCount if args.per_trace_colours else totalFrameCount if args.per_frame_colours else totalInputCount
                subplot['palette_count'] = max(0, subplot['palette_count'] - len(subplot['colours']))
            subplot['colours'].extend([f"rgba({int(255*r)}, {int(255*g)}, {int(255*b)}, {subplot['palette_opacity']})" for (r, g, b) in seaborn.color_palette(subplot['palette'], subplot['palette_count'])])

            subplot['cmap'] = None

            if subplot['palette_reverse']:
                subplot['colours'].reverse()

            if args.theme == 'palette' and args.colour_debug:
                print(f'    subplot @ [{r}, {c}, local({subplot["palette_local"]})]: ' + ' '.join(subplot['colours']))
globalPaletteIndex = args.palette_start

legendEntries = []

plotFd = None
if (scriptPath is None):
    args.script_only = False
    plotFd, plotScriptName = tempfile.mkstemp()
    plotScript = open(plotScriptName, 'w+')
else:
    plotScriptName = os.path.abspath(scriptPath)
    plotScript = open(plotScriptName, 'w+')

plotScript.write(f"""#!/usr/bin/env python3
#
# Generated by {__prog__} {__version__}, available at {__url__}

import os
import sys
import shutil
import subprocess
import tempfile
import argparse
import plotly
import plotly.graph_objects as go
import plotly.io as pio
from plotly.subplots import make_subplots

# Disable MathJAX to avoid unnecessary message in pdf output
pio.kaleido.scope.mathjax = None

filepath = os.path.dirname(__file__)
filename, fileext = os.path.splitext(os.path.basename(__file__))

parser = argparse.ArgumentParser(description="plots the contained figure")
parser.add_argument("--font-size", help="font size (default %(default)s)", type=int, default={args.font_size})
parser.add_argument("--font-colour", help="font colour (default %(default)s)", default='{args.font_colour}')
parser.add_argument("--font-family", help="font family (default %(default)s)", default='{args.font_family}')
parser.add_argument("--width", help="width of output file (default %(default)s)", type=int, default={args.width})
parser.add_argument("--height", help="height of output (default %(default)s)", type=int, default={args.height})
parser.add_argument("--output", "-o", help="output file (html, png, jpeg, pdf...) (default %(default)s)", type=str, nargs="+", default={args.output})
parser.add_argument("--addon", help=f"execute specified addon files or {{filename}}_addon{{fileext}} if found (default %(default)s)", choices=["on", "off"], default="{args.addon}")
parser.add_argument("--addon-files", help=f"specify addon files to source when addon is enabled (default %(default)s)", default={args.addon_files}, nargs='+')
parser.add_argument("--browser", help="open plot in browser", action="store_true")
parser.add_argument("--quiet", "-q", help="no warnings and don't open output file", action="store_true")

args = parser.parse_args()

if len(args.output) == 0:
    args.browser = True
""")


subplotTitles = []

plotScript.write(f"""\n\nplotly.io.templates.default = '{"plotly_white" if args.theme == "palette" else args.theme}'

fig = make_subplots(
    cols={subplotGrid[0]['max']},
    rows={subplotGrid[1]['max']},
    shared_xaxes={args.x_share},
    shared_yaxes={args.y_share},
    y_title={args.y_master_title},
    x_title={args.x_master_title},
    vertical_spacing={args.vertical_spacing},
    horizontal_spacing={args.horizontal_spacing},
    specs=[""")
for r in range(1, subplotGrid[1]['max'] + 1):
    plotScript.write("\n        [")
    for c in range(1, subplotGrid[0]['max'] + 1):
        if (r in subplotGridDefinition and c in subplotGridDefinition[r]):
            plotScript.write(f"{{'rowspan': {subplotGridDefinition[r][c]['rowspan']}, 'colspan': {subplotGridDefinition[r][c]['colspan']}, 'secondary_y': {subplotGridDefinition[r][c]['secondary_y']}}}, ")
            subplotTitles.append('' if subplotGridDefinition[r][c]['title'] is None else subplotGridDefinition[r][c]['title'])
        else:
            plotScript.write("None,")
    plotScript.write("],")
plotScript.write(f"""
    ],
    subplot_titles={subplotTitles}
)""")

currentInputIndex = None
frameIndex = 0
traceIndex = 0
inputIndex = 0

for input in data:
    options = input['options']
    frames = input['frames']
    subplot = subplotGridDefinition[options.row][options.col]
    plotRange = []
    inputTraceIndex = 0
    inputFrameIndex = 0
    multiCategory = False

    if options.traceCount == 0:
      print(f"WARNING: frame {inputFrameIndex} from input files {', '.join(options.filenames)} has no traces")
      continue

    if options.plot not in ['violin', 'box'] and options.distribution_mode in ['aggregated', 'raggregated']:
      if not args.quiet:
        print(f"WARNING: distribution mode has no effect on plot type '{options.plot}'", file=sys.stderr)
      options.distribution_mode = 'normal'

    if options.plot == 'box' and options.box_whisker not in ['minmax', 'maxmin'] and options.box_type == 'raw':
      if not args.quiet:
        print(f"WARNING: box whisker can only be chosen in precomputed box types", file=sys.stderr)
      options.box_whisker = 'minmax'

    if options.plot == 'box' and options.box_type == 'precomputed':
      whiskersCorrect = False
      if options.box_whisker.startswith('p'):
        if whiskersCorrect := isFloat(options.box_whisker[1:]) and 75.0 < float(options.box_whisker[1:]) <= 100.0:
          options.box_whisker = float(options.box_whisker[1:]) / 100.0
      elif options.box_whisker.startswith('iqr'):
        whiskersCorrect = len(options.box_whisker) == 3 or (isFloat(options.box_whisker[3:]) and 0 < float(options.box_whisker[3:]))
      else:
        whiskersCorrect = options.box_whisker in ['minmax', 'maxmin', 'none']
      if not whiskersCorrect:
          raise Exception(f"ERROR: unknown box whisker mode '{options.box_whisker}'")

    if options.plot == 'box' and options.distribution_mode in ['aggregated', 'raggregated'] and options.distribution_scale > 0 and not options.box_type == 'precomputed':
      if not args.quiet:
        print(f"WARNING: switching to precomputed box type for accurate statistics in distribution scaling", file=sys.stderr)
      options.box_type = 'precomputed'

    for frame in frames:
        # NaN cannot be plotted or used, cast it to None
        # Drop only columns/rows NaN values and replace NaN with None
        frame = frame.dropna(how='all', axis=0)
        frame = frame.replace({numpy.nan: None})
        frame.index = frame.index.to_series().replace({numpy.nan: None})
        frameTraceIndex = 0

        expandDistribution = None
        if options.distribution_mode in ['aggregated', 'raggregated']:
          if options.plot in ['violin', 'box']:
            if all(frame.index.map(isFloat)):
              expandDistribution = frame.index.map(float).to_list()
            elif not args.quiet:
              print(f"WARNING: cannot expand distribution for non numeric index in frame {inputFrameIndex}", file=sys.stderr)
          else:
            if not args.quiet:
              print(f"WARNING: distribution mode has no effect on plot type '{options.plot}' in frame {inputFrameIndex}", file=sys.stderr)

        _categories = None
        for specialFrameColumn in options.frameSpecialColumns:
          if specialFrameColumn not in frame.columns:
            continue
          for colIndex in range(len(frame.columns)):
            colName = str(frame.columns[colIndex])
            if (colName == options.special_column_prefix + 'category') and _categories is None:
              _categories = ['' if x is None else x for x in frame.iloc[:, colIndex].values.tolist()]

        if options.plot not in ['heatmap', 'dendrogram']:
            for colIndex, _ in enumerate(frame.columns):
                col = str(frame.columns[colIndex])
                if col in options.traceSpecialColumns or col in options.frameSpecialColumns:
                  continue

                if options.trace_colours and frameTraceIndex < len(options.trace_colours):
                  fillcolour = options.trace_colours[frameTraceIndex]
                else:
                  colourIndex = (subplot['palette_index'] if subplot['palette_local'] else globalPaletteIndex) % len(subplot['colours'])
                  fillcolour = subplot['colours'][colourIndex]
                markercolour = options.line_colour

                _errors_symmetric = True
                _errors_pos = None
                _errors_neg = None
                _bases = None
                _labels = None
                _colours = None
                _sizes = None

                for nextColIndex in range(colIndex + 1, colIndex + 1 + len(options.traceSpecialColumns) if colIndex + 1 + len(options.traceSpecialColumns) <= len(frame.columns) else len(frame.columns)):
                  nextCol = str(frame.columns[nextColIndex])
                  if (nextCol not in options.traceSpecialColumns):
                    break
                  if (nextCol == options.special_column_prefix + 'error') and (_errors_pos is None):
                    _errors_pos = [x if (x is not None) else 0 for x in frame.iloc[:, nextColIndex].values.tolist()]
                  elif (nextCol == options.special_column_prefix + 'error+') and (_errors_pos is None):
                    _errors_symmetric = False
                    _errors_pos = [x if (x is not None) else 0 for x in frame.iloc[:, nextColIndex].values.tolist()]
                  elif (nextCol == options.special_column_prefix + 'error-') and (_errors_neg is None):
                    _errors_symmetric = False
                    _errors_neg = [x if (x is not None) else 0 for x in frame.iloc[:, nextColIndex].values.tolist()]
                  elif (nextCol == options.special_column_prefix + 'offset') and (_bases is None):
                    _bases = [x if (x is not None) else 0 for x in frame.iloc[:, nextColIndex].values.tolist()]
                  elif (nextCol == options.special_column_prefix + 'label') and (_labels is None):
                    _labels = frame.iloc[:, nextColIndex].values.tolist()
                  elif (nextCol == options.special_column_prefix + 'size') and (_sizes is None):
                    _sizes = frame.iloc[:, nextColIndex].values.tolist()
                  elif (nextCol == options.special_column_prefix + 'colour') and (_colours is None) and (frameTraceIndex >= len(options.trace_colours)):
                    _colours = frame.iloc[:, nextColIndex].values.tolist()
                    _colours = [c if c is not None else fillcolour for c in _colours]

                traceName = col
                if (inputTraceIndex < len(options.trace_names)):
                    traceName = options.trace_names[inputTraceIndex]
                elif (options.use_name is not None):
                    traceName = options.use_name
                elif (traceIndex < len(args.global_trace_names)):
                    traceName = args.global_trace_names[traceIndex]

                showInLegend = options.legend_entries == 'all'

                if (options.plot == 'line'):
                  ydata = frame.iloc[:, colIndex].values.tolist() if not options.vertical else list(frame.index)
                  xdata = frame.iloc[:, colIndex].values.tolist() if options.vertical else list(frame.index)
                  updateRange(plotRange, [xdata, ydata])
                elif (options.plot == 'bar'):
                  ydata = frame.iloc[:, colIndex].tolist() if options.vertical else list(frame.index)
                  xdata = frame.iloc[:, colIndex].tolist() if not options.vertical else list(frame.index)
                  if _bases is not None:
                    rxdata = xdata
                    rydata = ydata
                    if (options.horizontal):
                      rxdata = [a + b if (a is not None and b is not None) else a if a is not None else b for a, b in zip(xdata, _bases)]
                    else:
                      rydata = [a + b if (a is not None and b is not None) else a if a is not None else b for a, b in zip(ydata, _bases)]
                    updateRange(plotRange, [rxdata, rydata])
                  else:
                    updateRange(plotRange, [xdata, ydata])
                else:  # Box and Violin
                    rawData = frame.iloc[:, colIndex].values
                    if all(x is None for x in rawData):
                      if not args.quiet:
                        print(f"WARNING: no data or only NaN found at column {colIndex} of frame '{inputFrameIndex}'", file=sys.stderr)
                      rawData[0] = 0

                    precomputeStats = options.plot == 'box' and options.box_type == 'precomputed'
                    precomputeQ1 = None
                    precomputeQ2 = None
                    precomputeQ3 = None
                    precomputeStd = None
                    precomputeMean = None
                    precomputeLF = None
                    precomputeUF = None

                    localExpand = None
                    data = [x for x in rawData if x is not None]

                    if expandDistribution is not None and options.distribution_mode in ['aggregated', 'raggregated']:
                      localExpand = [x for (x, y) in zip(expandDistribution, rawData) if y is not None]

                    if not precomputeStats and localExpand is not None:
                      if options.distribution_scale > 0:
                        maxData = max(data)
                        data = [(options.distribution_scale / maxData) * x for x in data]
                      data = numpy.floor(data).astype(numpy.int64)

                    if localExpand is not None and options.distribution_mode == 'raggregated':
                        data, localExpand = localExpand, data

                    if precomputeStats:
                      reqUfP = 0.0 if not isFloat(options.box_whisker) else options.box_whisker
                      reqLfP = 1.0 - reqUfP

                      lfP, precomputeQ1, precomputeQ2, precomputeQ3, ufP = weightedQuantiles(data, [reqLfP, 0.25, 0.5, 0.75, reqUfP], localExpand)
                      precomputeMean, precomputeStd = weightedMeanStd(data, localExpand)
                      if isFloat(options.box_whisker):
                        precomputeLF = lfP
                        precomputeUF = ufP
                      elif options.box_whisker.startswith('iqr'):
                        iqrFactor = float(options.box_whisker[3:]) if len(options.box_whisker) > 3 and isFloat(options.box_whisker[3:]) else 1.5
                        precomputeLF = precomputeQ1 - (iqrFactor * (precomputeQ3 - precomputeQ1))
                        precomputeUF = precomputeQ3 + (iqrFactor * (precomputeQ3 - precomputeQ1))
                      elif options.box_whisker in ['minmax', 'maxmin']:
                        precomputeLF = min(data) if localExpand is None else 0 if sum(localExpand) == 0 else min(x for x, w in zip(data, localExpand) if w != 0)
                        precomputeUF = max(data) if localExpand is None else 0 if sum(localExpand) == 0 else max(x for x, w in zip(data, localExpand) if w != 0)
                      else:
                        precomputeLF = None
                        precomputeUF = None

                    if not precomputeStats and localExpand is not None:
                      # Expansion is using floor to avoid artificially lifting a dsitribution
                      # this way the scaling factor corresponds to including the percentile of values
                      # distribution_scale of 1000 would be the 99.9th percentile
                      data = list(numpy.repeat(data, localExpand))

                    if precomputeStats:
                      ydata = [col] if not options.vertical else None
                      xdata = [col] if options.vertical else None
                      stats = [precomputeQ1, precomputeQ2, precomputeQ3, precomputeStd, precomputeLF, precomputeUF, precomputeMean]
                      updateRange(plotRange, [None, stats] if options.vertical else [stats, None])
                    else:
                      index = None
                      ydata = index if not options.vertical else data
                      xdata = index if options.vertical else data
                      dotMean = None if len(data) == 0 else numpy.mean(data)
                      updateRange(plotRange, [xdata, ydata])

                if options.offsetgroups == 'auto':
                    offsetgroup = options.subplotTraceIndex + inputFrameIndex + frameTraceIndex + 1 if args.bar_mode == 'group' else None
                else:
                    if inputTraceIndex < len(options.offsetgroups):
                        offsetgroup = options.offsetgroups[inputTraceIndex]
                    else:
                        offsetgroup = options.offsetgroups[-1]

                if options.category != 'none' and _categories is not None:
                  if options.horizontal and options.plot != 'line':
                    ydata = [_categories, ydata] if options.category == 'normal' else [ydata, _categories]
                  else:
                    xdata = [_categories, xdata] if options.category == 'normal' else [xdata, _categories]

                if traceName not in legendEntries:
                    if options.legend_entries == 'unique':
                        showInLegend = True
                    legendEntries.append(traceName)

                if options.plot == 'line':
                    lineMarker = options.line_markers[-1] if len(options.line_markers) <= inputTraceIndex else options.line_markers[inputTraceIndex]
                    plotScript.write(f"""
fig.add_trace(go.Scatter(
    name='{traceName}',
    legendgroup='{traceName}',
    showlegend={showInLegend},
    mode='{options.line_mode}',""")
                    if (_colours is not None):
                        plotScript.write(f"""
    marker_color={_colours},""")
                    else:
                        plotScript.write(f"""
{commentColour}    marker_color='{fillcolour}',""")
                    plotScript.write(f"""
{commentColour}    line_color='{fillcolour}',
{commentColour}    fillcolor='{fillcolour}', # Currently not supported through script, using default
    stackgroup='{'stackgroup-' + str(inputIndex) if options.line_stack else ''}',
    marker_symbol='{lineMarker}',
    marker_opacity={options.line_marker_opacity},
    marker_size={options.line_marker_size if _sizes is None else _sizes},
    marker_line_color='{options.line_colour}',
    marker_line_width={options.line_marker_width},
    fill='{options.line_fill}',
    line_dash='{options.line_dash}',
    line_shape='{options.line_shape}',
    line_width={options.line_width},
    y={ydata},
    x={xdata},""")
                    if (_labels is not None):
                        plotScript.write(f"""
    text={_labels},
    textposition='{options.line_text_position}',
    texttemplate='{options.text_template}',""")
                    if (_errors_pos is not None or _errors_neg is not None):
                        plotScript.write(f"""
    error_{'y' if options.horizontal else 'x'}=dict(
        visible={options.show_error},
        type='data',
        symmetric={_errors_symmetric},
        array={_errors_pos},
        arrayminus={_errors_neg},
    ),""")
                    plotScript.write(f"""
    opacity={options.opacity},
    cliponaxis={False if 'markers' in options.line_mode else True},
), col={options.col}, row={options.row}, secondary_y={options.y_secondary})
""")
                elif options.plot == 'bar':
                    plotScript.write(f"""
fig.add_trace(go.Bar(
    name='{traceName}',
    legendgroup='{traceName}',
    showlegend={showInLegend},
    orientation='{'v' if options.vertical else 'h'}',""")
                    if (_colours is not None):
                        plotScript.write(f"""
    marker_color={_colours},""")
                    else:
                        plotScript.write(f"""
{commentColour}    marker_color='{fillcolour}',""")
                    plotScript.write(f"""
{commentColour}    marker_line_color='{markercolour}',
    marker_line_width={options.line_width},
    width={options.bar_width},
    offset={options.bar_shift},
    offsetgroup={offsetgroup},
    y={ydata},
    x={xdata},""")
                    if (_labels is not None):
                        plotScript.write(f"""
    text={_labels},""")
                    plotScript.write(f"""
    texttemplate='{options.text_template}',
    textposition='{options.bar_text_position}',
    insidetextanchor='{options.bar_text_anchor}',
    textangle={options.bar_text_angle},""")
                    if (_bases is not None):
                        plotScript.write(f"""
    base={_bases},""")
                    if (_errors_pos is not None or _errors_neg is not None):
                        plotScript.write(f"""
    error_{'x' if options.horizontal else 'y'}=dict(
        visible={options.show_error},
        type='data',
        symmetric={_errors_symmetric},
        array={_errors_pos},
        arrayminus={_errors_neg},
    ),""")
                    plotScript.write(f"""
    opacity={options.opacity},
), col={options.col}, row={options.row}, secondary_y={options.y_secondary})
""")
                elif options.plot == 'box':
                    markercolour = options.line_colour
                    plotScript.write(f"""
fig.add_trace(go.Box(
    name='{traceName}',
    legendgroup='{traceName}',
    showlegend={showInLegend},
    y={ydata},
    x={xdata},
    q1={None if precomputeQ1 is None else [precomputeQ1]},
    median={None if precomputeQ2 is None else [precomputeQ2]},
    q3={None if precomputeQ3 is None else [precomputeQ3]},
    mean={None if precomputeMean is None else [precomputeMean]},
    sd={None if precomputeStd is None else [precomputeStd]},
    upperfence={None if precomputeUF is None else [precomputeUF]},
    lowerfence={None if precomputeLF is None else [precomputeLF]},
    boxpoints=False,
    boxmean={True if options.box_mean == 'line' else False},
    width={options.box_width},
{commentColour}    fillcolor='{fillcolour}',
{commentColour}    line_color='{markercolour}',
{commentColour}    marker_color='{markercolour}',
    line_width={options.line_width},
    orientation='{'v' if options.vertical else 'h'}',
    opacity={options.opacity},
), col={options.col}, row={options.row}, secondary_y={options.y_secondary})
""")
                    if options.box_mean == 'dot':
                        plotScript.write(f"""
fig.add_trace(go.Scatter(
    name='mean_{traceName}',
    legendgroup='{traceName}',
    showlegend=False,
    y={[traceName] if not options.vertical else [precomputeMean] if precomputeMean is not None else [dotMean]},
    x={[traceName] if options.vertical else [precomputeMean] if precomputeMean is not None else [dotMean]},
{commentColour}    fillcolor='{fillcolour}',
{commentColour}    line_color='{markercolour}',
    line_width={options.line_width},
    opacity={options.opacity},
), col={options.col}, row={options.row}, secondary_y={options.y_secondary})
""")
                elif options.plot == 'violin':
                    if options.violin_mode == 'halfhalf':
                        side = 'negative' if inputTraceIndex % 2 == 0 else 'positive'
                    elif options.violin_mode.endswith('half') or options.violin_mode.endswith('halfgroup'):
                        side = 'negative' if options.violin_mode.startswith('neg') else 'positive'
                    else:
                        side = 'both'
                    markercolour = options.line_colour
                    plotScript.write(f"""
fig.add_trace(go.Violin(
    name='{traceName}',
    legendgroup='{traceName}',
    showlegend={showInLegend},
    scalegroup='trace{inputTraceIndex}',
    y={ydata},
    x={xdata},
{commentColour}    fillcolor='{fillcolour}',
{commentColour}    line_color='{options.line_colour}',
{commentColour}    marker_color='{markercolour}',
    line_width={options.line_width},
    side='{side}',
    width={options.violin_width},
    scalemode='width',
    points={False if options.violin_points == 'none' else options.violin_points},
    jitter={options.violin_jitter},
    orientation='{'v' if options.vertical else 'h'}',
    meanline_visible={True if options.violin_mean == 'line' else False},
    box_visible={True if options.violin_mean == 'box' else False},
    opacity={options.opacity},
), col={options.col}, row={options.row}, secondary_y={options.y_secondary})
""")

                traceIndex += 1
                frameTraceIndex += 1
                inputTraceIndex += 1
                if subplot['palette_local']:
                    subplot['palette_index'] += 1 if args.per_trace_colours else 0
                else:
                    globalPaletteIndex += 1 if args.per_trace_colours else 0
            inputFrameIndex += 1
            frameIndex += 1
            if subplot['palette_local']:
                subplot['palette_index'] += 1 if args.per_frame_colours else 0
            else:
                globalPaletteIndex += 1 if args.per_frame_colours else 0

        else:
            # Generate continuous colour scale as we will need it here
            if options.plot == 'heatmap':
              if subplot['cmap'] is None:
                try:
                    seaborn_cmap = seaborn.color_palette(subplot['palette'], as_cmap=True)
                    if subplot['palette_cmap'] in ['log', 'rlog']:
                        cmap_range = numpy.logspace(numpy.log10(0.1), numpy.log10(1.1), 255) - 0.1
                        if subplot['palette_cmap'] == 'rlog':
                            cmap_range = numpy.flip(1 - cmap_range)
                    else:
                        cmap_range = numpy.arange(0.0, 1.0 + (1.0 / 255), 1.0 / 255)
                    if subplot['palette_reverse']:
                        cmap_range = numpy.flip(cmap_range)
                    subplot['cmap'] = [f'rgb({r}, {g}, {b})' for (r, g, b) in [map(numpy.uint8, 255 * numpy.array(seaborn_cmap(i)[:3])) for i in cmap_range]]
                except Exception:
                    raise 'ERROR: could not generated continuous colour scale, is the chosen seaborn palette compatible?'

              data = frame.values.T.tolist() if options.vertical else frame.values.tolist()
              yaxis = list(frame.columns) if options.vertical else list(frame.index)
              xaxis = list(frame.columns) if not options.vertical else list(frame.index)
              updateRange(plotRange, [xaxis, yaxis])

              plotScript.write(f"""
fig.add_trace(go.Heatmap(
    y={yaxis},
    x={xaxis},
    z={data},
    zauto={True if options.z_range_from == 'auto' and options.z_range_to == 'auto' else False},
    zmin={None if options.z_range_from == 'auto' else options.z_range_from},
    zmax={None if options.z_range_to == 'auto' else options.z_range_to},
{commentColour}    colorscale={subplot['cmap']},
    colorbar_orientation='{options.colour_scale_orientation}',
    showscale={options.colour_scale != 'hide'},
    reversescale={options.colour_scale == 'reverse'},
), col={options.col}, row={options.row}, secondary_y={options.y_secondary})
              """)
            elif options.plot == 'dendrogram':
              data = frame.values.tolist()
              labels = list(frame.index)


              if options.dendrogram_linkage in ['centroid', 'median', 'ward'] and options.dendrogram_distance != 'euclidiean' and not args.quiet:
                print(f"WARNING: linkage method {options.dendrogram_linkage} is only correctly defined for the euclidian distance!")

              dendrogram = plotly.figure_factory.create_dendrogram(
                numpy.array(data),
                labels=labels,
                orientation='bottom' if options.vertical else 'left' if not options.y_secondary else 'right',
                colorscale=subplot['colours'] if args.theme == 'palette' else None,
                distfun=lambda x: scipy.spatial.distance.pdist(x, options.dendrogram_distance),
                linkagefun=lambda x: scipy.cluster.hierarchy.linkage(x, options.dendrogram_linkage),
                color_threshold=options.dendrogram_colour_threshold
              )

              if options.vertical:
                options.x_show_zero_line = False
                options.y_show_zero_line = False
                options.x_tickmode = dendrogram['layout']['xaxis']['tickmode']
                options.x_ticktext = list(dendrogram['layout']['xaxis']['ticktext'])
                options.x_tickvals = list(dendrogram['layout']['xaxis']['tickvals'])
              else:
                options.x_show_zero_line = False
                options.y_show_zero_line = False
                options.y_tickmode = dendrogram['layout']['yaxis']['tickmode']
                options.y_ticktext = list(dendrogram['layout']['yaxis']['ticktext'])
                options.y_tickvals = list(dendrogram['layout']['yaxis']['tickvals'])

              for dendrogramTrace in dendrogram['data']:
                updateRange(plotRange, [list(dendrogramTrace['x']), list(dendrogramTrace['y'])] if options.horizontal else [list(dendrogramTrace['y']), list(dendrogramTrace['x'])])
                dendrogramTrace['line_dash'] = options.line_dash
                dendrogramTrace['line_width'] = options.line_width
                dendrogramTrace['opacity'] = options.opacity
                sDendrogramTrace = f"{dendrogramTrace}".replace('Scatter', 'go.Scatter').replace('array([','[').replace('])',']')
                plotScript.write(f"""
fig.add_trace({sDendrogramTrace}, col={options.col}, row={options.row}, secondary_y={options.y_secondary})
              """)

              if options.vertical:
                correction = (plotRange[1]['max'] - plotRange[1]['min']) * 0.025
                plotRange[1]['min'] -= correction
                plotRange[1]['max'] += correction
              else:
                correction = (plotRange[0]['max'] - plotRange[0]['min']) * 0.025
                plotRange[0]['min'] -= correction
                plotRange[0]['max'] += correction

            else:
              raise f'ERROR: unknown plot type {options.plot}'



        inputIndex += 1
        if subplot['palette_local']:
            subplot['palette_index'] += 1 if args.per_input_colours else 0
        else:
            globalPaletteIndex += 1 if args.per_input_colours else 0

    # Find out if we need left, right and bottom margin:
    if defaultLeftMargin is None and options.col == 1 and options.y_title and not options.y_secondary:
        defaultLeftMargin = True
    if defaultTopMargin is None and options.row == 1 and options.title:
        # default top margin is 100 which is a bit much for just having subplot titles:
        defaultTopMargin = True
        if args.margin_t is None and args.margins is None:
            args.margin_t = 40
    if defaultRightMargin is None and options.col + options.colspan - 1 == subplotGrid[0]['max'] and options.y_title is not None and options.y_secondary:
        defaultRightMargin = True
    if defaultBottomMargin is None and options.row + options.rowspan - 1 == subplotGrid[1]['max'] and options.x_title is not None:
        defaultBottomMargin = True

    # If line width was not explicitly set, set the axis line width for the multi category axis to one
    if multiCategory and options.horizontal and not options.y_line_width_forced:
        options.y_line_width = 1
    if multiCategory and options.vertical and not options.x_line_width_forced:
        options.x_line_width = 1
    plotScript.write("\n\n")
    plotScript.write("# Subplot specific options:\n")
    plotScript.write(f"""fig.update_yaxes(
    {'# ' if options.y_show is None else ''}visible={options.y_show},
    {'# ' if options.y_show_tick_labels is None else ''}showticklabels={options.y_show_tick_labels},
    {'# ' if options.y_show_grid is None else ''}showgrid={options.y_show_grid},
    {'# ' if options.y_show_zero_line is None else ''}zeroline={options.y_show_zero_line},
    {'# ' if options.y_mirror is None else ''}mirror={options.y_mirror},
    type='{options.y_type}',
    rangemode='{options.y_range_mode}',
    autorange={options.y_autorange},
    automargin={True if options.y_title_standoff is None else False},
    {'# ' if options.y_title is None else ''}title_text='{options.y_title}',
    title_standoff={options.y_title_standoff},
    tickcolor={options.y_colour},
    tickformat='{options.y_tick_format}',
    ticksuffix='{options.y_tick_suffix}',
    tickprefix='{options.y_tick_prefix}',
    tickmode='{options.y_tickmode}',
    ticks='{options.y_ticks}',
    nticks={options.y_nticks},
    {'# ' if options.y_tick0 is None else ''}tick0={options.y_tick0},
    {'# ' if options.y_dtick is None else ''}dtick={options.y_dtick},
    tickvals={options.y_tickvals},
    ticktext={options.y_ticktext},
    tickangle={options.y_tickangle},
    showline={options.y_line_width > 0},
    linewidth={options.y_line_width},
    linecolor={options.y_colour},
    gridcolor={options.y_grid_colour},
    showdividers={options.y_line_width > 0},
    dividercolor={options.y_colour},
    dividerwidth={options.y_line_width},
    row={options.row}, col={options.col}, secondary_y={options.y_secondary}
)\n""")
    plotScript.write(f"""fig.update_xaxes(
    {'# ' if options.x_show is None else ''}visible={options.x_show},
    {'# ' if options.x_show_tick_labels is None else ''}showticklabels={options.x_show_tick_labels},
    {'# ' if options.x_show_grid is None else ''}showgrid={options.x_show_grid},
    {'# ' if options.x_show_zero_line is None else ''}zeroline={options.x_show_zero_line},
    {'# ' if options.x_mirror is None else ''}mirror={options.x_mirror},
    type='{options.x_type}',
    rangemode='{options.x_range_mode}',
    autorange={options.x_autorange},
    automargin={True if options.x_title_standoff is None else False},
    {'# ' if options.x_title is None else ''}title_text='{options.x_title}',
    title_standoff={options.x_title_standoff},
    tickcolor={options.x_colour},
    tickformat='{options.x_tick_format}',
    ticksuffix='{options.x_tick_suffix}',
    tickprefix='{options.x_tick_prefix}',
    tickmode='{options.x_tickmode}',
    ticks='{options.x_ticks}',
    nticks={options.x_nticks},
    {'# ' if options.x_tick0 is None else ''}tick0={options.x_tick0},
    {'# ' if options.x_dtick is None else ''}dtick={options.x_dtick},
    tickvals={options.x_tickvals},
    ticktext={options.x_ticktext},
    tickangle={options.x_tickangle},
    showline={options.x_line_width > 0},
    linewidth={options.x_line_width},
    linecolor={options.x_colour},
    gridcolor={options.x_grid_colour},
    showdividers={options.x_line_width > 0},
    dividercolor={options.x_colour},
    dividerwidth={options.x_line_width},
    row={options.row}, col={options.col}
)\n""")
    if options.y_range_from is not None or options.y_range_to is not None:
        options.y_range_from = options.y_range_from if options.y_range_from is not None else plotRange[1]['min']
        options.y_range_to = options.y_range_to if options.y_range_to is not None else plotRange[1]['max']
        plotScript.write(f"fig.update_yaxes(range=[{options.y_range_from}, {options.y_range_to}], col={options.col}, row={options.row}, secondary_y={options.y_secondary})\n")
    else:
      if options.y_autorange == 'True':
        plotScript.write('# ')
      plotScript.write(f"fig.update_yaxes(range=[{plotRange[0]['min']}, {plotRange[0]['max']}], col={options.col}, row={options.row}, secondary_y={options.y_secondary})\n")

    if options.x_range_from is not None or options.x_range_to is not None:
        options.x_range_from = options.x_range_from if options.x_range_from is not None else plotRange[0]['min']
        options.x_range_to = options.x_range_to if options.x_range_to is not None else plotRange[0]['max']
        plotScript.write(f"fig.update_xaxes(range=[{options.x_range_from}, {options.x_range_to}], col={options.col}, row={options.row})\n")
    else:
      if options.x_autorange == 'True':
        plotScript.write('# ')
      plotScript.write(f"fig.update_xaxes(range=[{plotRange[1]['min']}, {plotRange[1]['max']}], col={options.col}, row={options.row})\n")
    plotScript.write(f"fig.update_coloraxes(colorbar_orientation='{options.colour_scale_orientation}', showscale={options.colour_scale == 'show'}, col={options.col}, row={options.row})\n")
    plotScript.write("\n")

if violinMode.endswith('halfgroup'):
    violinMode = 'group'
elif violinMode.endswith('half'):
    violinMode = 'overlay'

plotScript.write('# Global modes and paramters:\n')
plotScript.write(f"fig.update_layout(title={args.master_title})\n")
plotScript.write(f"fig.update_layout(barmode='{args.bar_mode}', boxmode='{args.box_mode}', violinmode='{violinMode}')\n")
plotScript.write(f"fig.update_layout(bargap={args.bar_gap}, bargroupgap={args.bar_group_gap}, boxgap={args.box_gap}, boxgroupgap={args.box_group_gap}, violingap={args.violin_gap}, violingroupgap={args.violin_group_gap})\n")

plotScript.write("\n# Layout Legend\n")
plotScript.write(f"fig.update_layout(showlegend={args.legend_show}, legend_traceorder='{args.legend_order}', legend_tracegroupgap={args.legend_groupgap})\n")
plotScript.write(f"{'# ' if args.legend_y_anchor is None else ''}fig.update_layout(legend_yanchor='{'auto' if args.legend_y_anchor is None else args.legend_y_anchor}')\n")
plotScript.write(f"{'# ' if args.legend_x_anchor is None else ''}fig.update_layout(legend_xanchor='{'auto' if args.legend_x_anchor is None else args.legend_x_anchor}')\n")
plotScript.write(f"fig.update_layout(legend=dict(x={args.legend_x}, y={args.legend_y}, orientation='{'v' if args.legend_vertical else 'h'}', bgcolor='rgba(255,255,255,0)'))\n")

plotScript.write("\n# Layout Plot and Background\n")
plotScript.write(f"{commentBackgroundColour}fig.update_layout(paper_bgcolor='{args.background_colour}', plot_bgcolor='{args.background_colour}')\n")

# If no explicit margin is set, the brwoser will recieve a top margin for the plotly option bar to not overlap
browserTopMargin = args.margin_t if args.margin_t is not None else args.margins if args.margins is not None else 20
args.margin_b = args.margin_b if args.margin_b is not None else args.margins if args.margins is not None else None if defaultBottomMargin else 0
args.margin_l = args.margin_l if args.margin_l is not None else args.margins if args.margins is not None else None if defaultLeftMargin else 0
args.margin_r = args.margin_r if args.margin_r is not None else args.margins if args.margins is not None else None if defaultRightMargin else 0
args.margin_t = args.margin_t if args.margin_t is not None else args.margins if args.margins is not None else None if defaultTopMargin else 0
args.margin_pad = args.margin_pad if args.margin_pad is not None else args.margins if args.margins is not None else None if defaultPadMargin else 0

plotScript.write(f"fig.update_layout(margin=dict(t={args.margin_t}, l={args.margin_l}, r={args.margin_r}, b={args.margin_b}, pad={args.margin_pad}))\n")

plotScript.write("\n# Plot Font\n")
plotScript.write(f"""fig.update_layout(font=dict(
    family=args.font_family,
    size=args.font_size,
{commentColour}    color=args.font_colour
))
""")

plotScript.write("""
if args.addon == 'on':
  # Execute addon file if found
  if args.addon_files is not None:
    for addonFile in args.addon_files:
      if os.path.exists(addonFile):
        exec(open(addonFile).read())
  elif os.path.exists(os.path.join(filepath,f'{filename}_addon{fileext}')):
        exec(open(os.path.join(filepath,f'{filename}_addon{fileext}')).read())

if len(args.output) > 0:
    for output in args.output:
        outputFormat = output.lower().split('.')[-1]
        if outputFormat == 'html':
            fig.write_html(output)
        else:
            fig.write_image(output, width=args.width, height=args.height)

        if not args.quiet:
            print(f'Saved to {output}')
            try:
                if sys.platform == "win32":
                    os.startfile(output)
                else:
                    opener = "open" if sys.platform == "darwin" else "xdg-open"
                    subprocess.call([opener, output], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
            except Exception:
                print(f'Could not open {output}!')

if args.browser:
    """)
if browserTopMargin != args.margin_t:
  plotScript.write(f"fig.update_layout(margin_t={browserTopMargin})")
plotScript.write("""
    fig.show(config={
       'toImageButtonOptions': { 'height': None, 'width': None, 'format': 'svg' },
       'displaylogo': False, 'modeBarButtonsToAdd': ['drawline', 'drawopenpath', 'drawclosedpath', 'drawcircle', 'drawrect', 'eraseshape'],
    })
""")

plotScript.close()
if args.browser or len(args.output) > 0:
    cmdLine = ['python3', plotScriptName]
    if args.browser:
        cmdLine.append('--browser')
    if args.quiet:
        cmdLine.append('--quiet')
    subprocess.check_call(cmdLine, cwd=scriptContext)

if scriptPath is None:
    os.close(plotFd)
    os.remove(plotScriptName)
else:
    for s in secondaryScriptPaths:
        shutil.copy(scriptPath, s)
    if not args.quiet:
        for s in [scriptPath] + secondaryScriptPaths:
            print(f"Script saved to {s}")
