"""
Evaluate model against reference 

Example: ost compare-structures -m model.pdb -r reference.cif

Loads the structures and performs basic cleanup:

 * Assign elements according to the PDB Chemical Component Dictionary
 * Map nonstandard residues to their parent residues as defined by the PDB
   Chemical Component Dictionary, e.g. phospho-serine => serine
 * Remove hydrogens
 * Remove OXT atoms
 * Remove unknown atoms, i.e. atoms that are not expected according to the PDB
   Chemical Component Dictionary
 * Select for peptide/nucleotide residues

The cleaned structures are optionally dumped using -d/--dump-structures

Output is written in JSON format (default: out.json). In case of no additional
options, this is a dictionary with 8 keys describing model/reference comparison:

 * "reference_chains": Chain names of reference
 * "model_chains": Chain names of model
 * "chem_groups": Groups of polypeptides/polynucleotides from reference that
   are considered chemically equivalent. You can derive stoichiometry from this.
   Contains only chains that are considered in chain mapping, i.e. pass a
   size threshold (defaults: 10 for peptides, 4 for nucleotides).
 * "chem_mapping": List of same length as "chem_groups". Assigns model chains to
   the respective chem group. Again, only contains chains that are considered
   in chain mapping.
 * "chain_mapping": A dictionary with reference chain names as keys and the
   mapped model chain names as values. Missing chains are either not mapped
   (but present in "chem_groups", "chem_mapping") or were not considered in
   chain mapping (short peptides etc.)
 * "aln": Pairwise sequence alignment for each pair of mapped chains in fasta
   format.
 * "inconsistent_residues": List of strings that represent name mismatches of
   aligned residues in form
   <trg_cname>.<trg_rnum>.<trg_ins_code>-<mdl_cname>.<mdl_rnum>.<mdl_ins_code>.
   Inconsistencies may lead to corrupt results but do not abort the program.
   Program abortion in these cases can be enforced with
   -ec/--enforce-consistency.
 * "status": SUCCESS if everything ran through. In case of failure, the only
   content of the JSON output will be \"status\" set to FAILURE and an
   additional key: "traceback".

The following additional keys store relevant input parameters to reproduce
results:

 * "model"
 * "reference"
 * "fault_tolerant"
 * "model_biounit"
 * "reference_biounit"
 * "residue_number_alignment"
 * "enforce_consistency"
 * "cad_exec"
 * "usalign_exec"
 * "lddt_no_stereochecks"

The pairwise sequence alignments are computed with Needleman-Wunsch using
BLOSUM62 (NUC44 for nucleotides). Many benchmarking scenarios preprocess the
structures to ensure matching residue numbers (CASP/CAMEO). In these cases,
enabling -rna/--residue-number-alignment is recommended.

Each score is opt-in and can be enabled with optional arguments.

Example to compute global and per-residue lDDT values as well as QS-score:

ost compare-structures -m model.pdb -r reference.cif --lddt --local-lddt \
--qs-score

Example to inject custom chain mapping

ost compare-structures -m model.pdb -r reference.cif -c A:B B:A
"""

import argparse
import os
import json
import time
import sys
import traceback

from ost import io
from ost.mol.alg import scoring

def _ParseArgs():
    parser = argparse.ArgumentParser(description = __doc__,
                                     formatter_class=argparse.RawDescriptionHelpFormatter,
                                     prog = "ost compare-structures")

    parser.add_argument(
        "-m",
        "--model",
        dest="model",
        required=True,
        help=("Path to model file."))

    parser.add_argument(
        "-r",
        "--reference",
        dest="reference",
        required=True,
        help=("Path to reference file."))

    parser.add_argument(
        "-o",
        "--output",
        dest="output",
        required=False,
        default="out.json",
        help=("Output file name. The output will be saved as a JSON file. "
              "default: out.json"))

    parser.add_argument(
        "-mf",
        "--model-format",
        dest="model_format",
        required=False,
        default=None,
        choices=["pdb", "cif", "mmcif"],
        help=("Format of model file. pdb reads pdb but also pdb.gz, same "
              "applies to cif/mmcif. Inferred from filepath if not given."))

    parser.add_argument(
        "-rf",
        "--reference-format",
        dest="reference_format",
        required=False,
        default=None,
        choices=["pdb", "cif", "mmcif"],
        help=("Format of reference file. pdb reads pdb but also pdb.gz, same "
              "applies to cif/mmcif. Inferred from filepath if not given."))

    parser.add_argument(
        "-mb",
        "--model-biounit",
        dest="model_biounit",
        required=False,
        default=None,
        type=int,
        help=("Only has an effect if model is in mmcif format. By default, "
              "the asymmetric unit (AU) is used for scoring. If there are "
              "biounits defined in the mmcif file, you can specify the "
              "(0-based) index of the one which should be used."))

    parser.add_argument(
        "-rb",
        "--reference-biounit",
        dest="reference_biounit",
        required=False,
        default=None,
        type=int,
        help=("Only has an effect if reference is in mmcif format. By default, "
              "the asymmetric unit (AU) is used for scoring. If there are "
              "biounits defined in the mmcif file, you can specify the "
              "(0-based) index of the one which should be used."))

    parser.add_argument(
        "-rna",
        "--residue-number-alignment",
        dest="residue_number_alignment",
        default=False,
        action="store_true",
        help=("Make alignment based on residue number instead of using "
              "a global BLOSUM62-based alignment (NUC44 for nucleotides).")) 

    parser.add_argument(
        "-ec",
        "--enforce-consistency",
        dest="enforce_consistency",
        default=False,
        action="store_true",
        help=("Enforce consistency. By default residue name discrepancies "
              "between a model and reference are reported but the program "
              "proceeds. If this flag is ON, the program fails for these "
              "cases."))

    parser.add_argument(
        "-d",
        "--dump-structures",
        dest="dump_structures",
        default=False,
        action="store_true",
        help=("Dump cleaned structures used to calculate all the scores as "
              "PDB files using specified suffix. Files will be dumped to the "
              "same location as original files."))

    parser.add_argument(
        "-ds",
        "--dump-suffix",
        dest="dump_suffix",
        default=".compare.structures.pdb",
        help=("Use this suffix to dump structures.\n"
              "Defaults to .compare.structures.pdb."))

    parser.add_argument(
        "-ft",
        "--fault-tolerant",
        dest="fault_tolerant",
        default=False,
        action="store_true",
        help=("Fault tolerant parsing."))

    parser.add_argument(
        "-c",
        "--chain-mapping",
        nargs="+",
        dest="chain_mapping",
        help=("Custom mapping of chains between the reference and the model. "
              "Each separate mapping consist of key:value pairs where key "
              "is the chain name in reference and value is the chain name in "
              "model."))

    parser.add_argument(
        "--lddt",
        dest="lddt",
        default=False,
        action="store_true",
        help=("Compute global lDDT score with default parameterization and "
              "store as key \"lddt\". Stereochemical irregularities affecting "
              "lDDT are reported as keys \"model_clashes\", "
              "\"model_bad_bonds\", \"model_bad_angles\" and the respective "
              "reference counterparts."))

    parser.add_argument(
        "--local-lddt",
        dest="local_lddt",
        default=False,
        action="store_true",
        help=("Compute per-residue lDDT scores with default parameterization "
              "and store as key \"local_lddt\". Score for each residue is "
              "accessible by key <chain_name>.<resnum>.<resnum_inscode>. "
              "Residue with number 42 in chain X can be extracted with: "
              "data[\"local_lddt\"][\"X.42.\"]. If there is an insertion "
              "code, lets say A, the residue key becomes \"X.42.A\". "
              "Stereochemical irregularities affecting lDDT are reported as "
              "keys \"model_clashes\", \"model_bad_bonds\", "
              "\"model_bad_angles\" and the respective reference "
              "counterparts. Atoms specified in there follow the following "
              "format: <chain_name>.<resnum>.<resnum_inscode>.<atom_name>"))

    parser.add_argument(
        "--bb-lddt",
        dest="bb_lddt",
        default=False,
        action="store_true",
        help=("Compute global lDDT score with default parameterization and "
              "store as key \"bb_lddt\". lDDT in this case is only computed on "
              "backbone atoms: CA for peptides and C3' for nucleotides"))

    parser.add_argument(
        "--bb-local-lddt",
        dest="bb_local_lddt",
        default=False,
        action="store_true",
        help=("Compute per-residue lDDT scores with default parameterization "
              "and store as key \"bb_local_lddt\". lDDT in this case is only "
              "computed on backbone atoms: CA for peptides and C3' for "
              "nucleotides. Per-residue scores are accessible as described for "
              "local_lddt."))

    parser.add_argument(
        "--cad-score",
        dest="cad_score",
        default=False,
        action="store_true",
        help=("Compute global CAD's atom-atom (AA) score and store as key "
              "\"cad_score\". --residue-number-alignment must be enabled "
              "to compute this score. Requires voronota_cadscore executable "
              "in PATH. Alternatively you can set cad-exec."))

    parser.add_argument(
        "--local-cad-score",
        dest="local_cad_score",
        default=False,
        action="store_true",
        help=("Compute local CAD's atom-atom (AA) scores and store as key "
              "\"local_cad_score\". Per-residue scores are accessible as "
              "described for local_lddt. --residue-number-alignments must be "
              "enabled to compute this score. Requires voronota_cadscore "
              "executable in PATH. Alternatively you can set cad-exec."))

    parser.add_argument(
        "--cad-exec",
        dest="cad_exec",
        default=None,
        help=("Path to voronota-cadscore executable (installed from "
              "https://github.com/kliment-olechnovic/voronota). Searches PATH "
              "if not set."))

    parser.add_argument(
        "--usalign-exec",
        dest="usalign_exec",
        default=None,
        help=("Path to USalign executable to compute TM-score. If not given, "
              "an OpenStructure internal copy of USalign code is used."))

    parser.add_argument(
        "--qs-score",
        dest="qs_score",
        default=False,
        action="store_true",
        help=("Compute QS-score, stored as key \"qs_global\", and the QS-best "
              "variant, stored as key \"qs_best\"."))

    parser.add_argument(
        "--rigid-scores",
        dest="rigid_scores",
        default=False,
        action="store_true",
        help=("Computes rigid superposition based scores. They're based on a "
              "Kabsch superposition of all mapped CA positions (C3' for "
              "nucleotides). Makes the following keys available: "
              "\"oligo_gdtts\": GDT with distance thresholds [1.0, 2.0, 4.0, "
              "8.0] given these positions and transformation, \"oligo_gdtha\": "
              "same with thresholds [0.5, 1.0, 2.0, 4.0], \"rmsd\": RMSD given "
              "these positions and transformation, \"transform\": the used 4x4 "
              "transformation matrix that superposes model onto reference."))

    parser.add_argument(
        "--interface-scores",
        dest="interface_scores",
        default=False,
        action="store_true",
        help=("Per interface scores for each interface that has at least one "
              "contact in the reference, i.e. at least one pair of heavy atoms "
              "within 5A. The respective interfaces are available from key "
              "\"interfaces\" which is a list of tuples in form (ref_ch1, "
              "ref_ch2, mdl_ch1, mdl_ch2). Per-interface scores are available "
              "as lists referring to these interfaces and have the following "
              "keys: \"nnat\" (number of contacts in reference), \"nmdl\" "
              "(number of contacts in model), \"fnat\" (fraction of reference "
              "contacts which are also there in model), \"fnonnat\" (fraction "
              "of model contacts which are not there in target), \"irmsd\" "
              "(interface RMSD), \"lrmsd\" (ligand RMSD), \"dockq_scores\" "
              "(per-interface score computed from \"fnat\", \"irmsd\" and "
              "\"lrmsd\"), \"interface_qs_global\" and \"interface_qs_best\" "
              "(per-interface versions of the two QS-score variants). The "
              "DockQ score is strictly designed to score each interface "
              "individually. We also provide two averaged versions to get one "
              "full model score: \"dockq_ave\", \"dockq_wave\". The first is "
              "simply the average of \"dockq_scores\", the latter is a "
              "weighted average with weights derived from \"nnat\". These two "
              "scores only consider interfaces that are present in both, the "
              "model and the reference. \"dockq_ave_full\" and "
              "\"dockq_wave_full\" add zeros in the average computation for "
              "each interface that is only present in the reference but not in "
              "the model."))

    parser.add_argument(
        "--patch-scores",
        dest="patch_scores",
        default=False,
        action="store_true",
        help=("Local interface quality score used in CASP15. Scores each "
              "model residue that is considered in the interface (CB pos "
              "within 8A of any CB pos from another chain (CA for GLY)). The "
              "local neighborhood gets represented by \"interface patches\" "
              "which are scored with QS-score and DockQ. Scores where not "
              "the full patches are represented by the reference are set to "
              "None. Model interface residues are available as key "
              "\"model_interface_residues\", reference interface residues as "
              "key \"reference_interface_residues\". Residues are represented "
              "as string in form <chain_name>.<resnum>.<resnum_inscode>. "
              "The respective scores are available as keys \"patch_qs\" and "
              "\"patch_dockq\""))

    parser.add_argument(
        "--tm-score",
        dest="tm_score",
        default=False,
        action="store_true",
        help=("Computes TM-score with the USalign tool. Also computes a "
              "chain mapping in case of complexes that is stored in the "
              "same format as the default mapping. TM-score and the mapping "
              "are available as keys \"tm_score\" and \"usalign_mapping\""))

    parser.add_argument(
        "--lddt-no-stereochecks",
        dest="lddt_no_stereochecks",
        default=False,
        action="store_true",
        help=("Disable stereochecks for lDDT computation"))

    parser.add_argument(
        "--n-max-naive",
        dest="n_max_naive",
        required=False,
        default=12,
        type=int,
        help=("If number of chains in model and reference are below or equal "
              "that number, the chain mapping will naively enumerate all "
              "possible mappings. A heuristic is used otherwise."))

    return parser.parse_args()

def _Rename(ent):
    """Revert chain names to original names.

    PDBize assigns chain name in order A,B,C,D... which does not allow to infer
    the original chain name. We do a renaming here:
    if there are two chains mapping to chain A the resulting
    chain names will be: A and A2.
    """
    new_chain_names = list()
    chain_indices = list() # the chains where we actually change the name
    suffix_indices = dict() # keep track of whats the current suffix index
                            # for each original chain name

    for ch_idx, ch in enumerate(ent.chains):
        if not ch.HasProp("original_name"):
            # pdbize doesnt set this property for chain names in ['_', '-']
            continue
        original_name = ch.GetStringProp("original_name")
        if original_name in new_chain_names:
            new_name = original_name + str(suffix_indices[original_name])
            new_chain_names.append(new_name)
            suffix_indices[original_name] = suffix_indices[original_name] + 1
        else:
            new_chain_names.append(original_name)
            suffix_indices[original_name] = 2
        chain_indices.append(ch_idx)
    editor = ent.EditXCS()
    # rename to nonsense to avoid clashing chain names
    for ch_idx in chain_indices:
        editor.RenameChain(ent.chains[ch_idx], ent.chains[ch_idx].name+"_yolo")
    # and do final renaming
    for new_name, ch_idx in zip(new_chain_names, chain_indices):
        editor.RenameChain(ent.chains[ch_idx], new_name)

def _LoadStructure(structure_path, sformat=None, fault_tolerant=False,
                   bu_idx=None):
    """Read OST entity either from mmCIF or PDB.

    The returned structure has structure_path attached as structure name
    """

    if not os.path.exists(structure_path):
        raise Exception(f"file not found: {structure_path}")

    if sformat is None:
        # Determine file format from suffix.
        ext = structure_path.split(".")
        if ext[-1] == "gz":
            ext = ext[:-1]
        if len(ext) <= 1:
            raise Exception(f"Could not determine format of file "
                            f"{structure_path}.")
        sformat = ext[-1].lower()

    # increase loglevel, as we would pollute the info log with weird stuff
    ost.PushVerbosityLevel(ost.LogLevel.Error)
    # Load the structure
    if sformat in ["mmcif", "cif"]:
        if bu_idx is not None:
            cif_entity, cif_seqres, cif_info = \
            io.LoadMMCIF(structure_path, info=True, seqres=True,
                         fault_tolerant=fault_tolerant)
            if bu_idx >= len(cif_info.biounits):
                raise RuntimeError(f"Invalid biounit index - requested {bu_idx} "
                                   f"must be < {len(cif_info.biounits)}.")
            biounit = cif_info.biounits[bu_idx]
            entity = biounit.PDBize(cif_entity, min_polymer_size=0)
            if not entity.IsValid():
                raise IOError(
                    "Provided file does not contain valid entity.")
            _Rename(entity)
        else:
            entity = io.LoadMMCIF(structure_path,
                                  fault_tolerant = fault_tolerant)
        if len(entity.residues) == 0:
            raise Exception(f"No residues found in file: {structure_path}")
    elif sformat == "pdb":
        entity = io.LoadPDB(structure_path, fault_tolerant = fault_tolerant)
        if len(entity.residues) == 0:
            raise Exception(f"No residues found in file: {structure_path}")
    else:
        raise Exception(f"Unknown/ unsupported file extension found for "
                        f"file {structure_path}.")
    # restore old loglevel and return
    ost.PopVerbosityLevel()
    entity.SetName(structure_path)
    return entity

def _AlnToFastaStr(aln):
    """ Returns alignment as fasta formatted string
    """
    s1 = aln.GetSequence(0)
    s2 = aln.GetSequence(1)
    return f">reference:{s1.name}\n{str(s1)}\nmodel:{s2.name}\n{str(s2)}"

def _GetInconsistentResidues(alns):
    lst = list()
    for aln in alns:
        for col in aln:
            r1 = col.GetResidue(0)
            r2 = col.GetResidue(1)
            if r1.IsValid() and r2.IsValid() and r1.GetName() != r2.GetName():
                ch_1 = r1.GetChain().name
                num_1 = r1.number.num
                ins_code_1 = r1.number.ins_code.strip("\u0000")
                id_1 = f"{ch_1}.{num_1}.{ins_code_1}"
                ch_2 = r2.GetChain().name
                num_2 = r2.number.num
                ins_code_2 = r2.number.ins_code.strip("\u0000")
                id_2 = f"{ch_2}.{num_2}.{ins_code_2}"
                lst.append(f"{id_1}-{id_2}")
    return lst

def _LocalScoresToJSONDict(score_dict):
    """ Convert ResNums to str for JSON serialization
    """
    json_dict = dict()
    for ch, ch_scores in score_dict.items():
        for num, s in ch_scores.items():
            ins_code = num.ins_code.strip("\u0000")
            json_dict[f"{ch}.{num.num}.{ins_code}"] = s
    return json_dict

def _InterfaceResiduesToJSONList(interface_dict):
    """ Convert ResNums to str for JSON serialization.

    Changes in this function will affect _PatchScoresToJSONList
    """
    json_list = list()
    for ch, ch_nums in interface_dict.items():
        for num in ch_nums:
            ins_code = num.ins_code.strip("\u0000")
            json_list.append(f"{ch}.{num.num}.{ins_code}")
    return json_list

def _PatchScoresToJSONList(interface_dict, score_dict):
    """ Creates List of patch scores that are consistent with interface residue
    lists
    """
    json_list = list()
    for ch, ch_nums in interface_dict.items():
        json_list += score_dict[ch]
    return json_list

def _Process(model, reference, args):

    mapping = None
    if args.chain_mapping is not None:
        mapping = {x.split(':')[0]: x.split(':')[1] for x in args.chain_mapping}

    scorer = scoring.Scorer(model, reference,
                            resnum_alignments = args.residue_number_alignment,
                            cad_score_exec = args.cad_exec,
                            custom_mapping = mapping,
                            usalign_exec = args.usalign_exec,
                            lddt_no_stereochecks = args.lddt_no_stereochecks,
                            n_max_naive = args.n_max_naive)

    ir = _GetInconsistentResidues(scorer.aln)
    if len(ir) > 0 and args.enforce_consistency:
        raise RuntimeError(f"Inconsistent residues observed: {' '.join(ir)}")

    out = dict()
    out["reference_chains"] = [ch.GetName() for ch in scorer.target.chains]
    out["model_chains"] = [ch.GetName() for ch in scorer.model.chains]
    out["chem_groups"] = scorer.chain_mapper.chem_groups
    out["chem_mapping"] = scorer.mapping.chem_mapping
    out["chain_mapping"] = scorer.mapping.GetFlatMapping()
    out["aln"] = [_AlnToFastaStr(aln) for aln in scorer.aln]
    out["inconsistent_residues"] = ir

    if args.lddt:
        out["lddt"] = scorer.lddt

    if args.local_lddt:
        out["local_lddt"] = _LocalScoresToJSONDict(scorer.local_lddt)

    if args.lddt or args.local_lddt:
        out["model_clashes"] = [x.ToJSON() for x in scorer.model_clashes]
        out["model_bad_bonds"] = [x.ToJSON() for x in scorer.model_bad_bonds]
        out["model_bad_angles"] = [x.ToJSON() for x in scorer.model_bad_angles]
        out["reference_clashes"] = [x.ToJSON() for x in scorer.target_clashes]
        out["reference_bad_bonds"] = [x.ToJSON() for x in scorer.target_bad_bonds]
        out["reference_bad_angles"] = [x.ToJSON() for x in scorer.target_bad_angles]

    if args.bb_lddt:
        out["bb_lddt"] = scorer.bb_lddt

    if args.bb_local_lddt:
        out["bb_local_lddt"] = _LocalScoresToJSONDict(scorer.bb_local_lddt)

    if args.cad_score:
        out["cad_score"] = scorer.cad_score

    if args.local_cad_score:
        out["local_cad_score"] = _LocalScoresToJSONDict(scorer.local_cad_score)

    if args.qs_score:
        out["qs_global"] = scorer.qs_global
        out["qs_best"] = scorer.qs_best

    if args.rigid_scores:
        out["oligo_gdtts"] = scorer.gdtts
        out["oligo_gdtha"] = scorer.gdtha
        out["rmsd"] = scorer.rmsd
        data = scorer.transform.data
        out["transform"] = [data[i:i + 4] for i in range(0, len(data), 4)]

    if args.interface_scores:
        out["interfaces"] = scorer.interfaces
        out["nnat"] = scorer.native_contacts
        out["nmdl"] = scorer.model_contacts
        out["fnat"] = scorer.fnat
        out["fnonnat"] = scorer.fnonnat
        out["irmsd"] = scorer.irmsd
        out["lrmsd"] = scorer.lrmsd
        out["dockq_scores"] = scorer.dockq_scores
        out["interface_qs_global"] = scorer.interface_qs_global
        out["interface_qs_best"] = scorer.interface_qs_best
        out["dockq_ave"] = scorer.dockq_ave
        out["dockq_wave"] = scorer.dockq_wave
        out["dockq_ave_full"] = scorer.dockq_ave_full
        out["dockq_wave_full"] = scorer.dockq_wave_full

    if args.patch_scores:
        out["model_interface_residues"] = \
        _InterfaceResiduesToJSONList(scorer.model_interface_residues)
        out["reference_interface_residues"] = \
        _InterfaceResiduesToJSONList(scorer.target_interface_residues)
        out["patch_qs"] = _PatchScoresToJSONList(scorer.model_interface_residues,
                                                 scorer.patch_qs)
        out["patch_dockq"] = _PatchScoresToJSONList(scorer.model_interface_residues,
                                                    scorer.patch_dockq)

    if args.tm_score:
        out["tm_score"] = scorer.tm_score
        out["usalign_mapping"] = scorer.usalign_mapping

    if args.dump_structures:
        try:
            io.SavePDB(scorer.model, model.GetName() + args.dump_suffix)
        except Exception as e:
            if "single-letter" in str(e) and args.model_biounit is not None:
                raise RuntimeError("Failed to dump processed model. PDB "
                                   "format only supports single character "
                                   "chain names. This is likely the result of "
                                   "chain renaming when constructing a user "
                                   "specified biounit. Dumping structures "
                                   "fails in this case.")
            else:
                raise
        try:
            io.SavePDB(scorer.target, reference.GetName() + args.dump_suffix)
        except Exception as e:
            if "single-letter" in str(e) and args.reference_biounit is not None:
                raise RuntimeError("Failed to dump processed reference. PDB "
                                   "format only supports single character "
                                   "chain names. This is likely the result of "
                                   "chain renaming when constructing a user "
                                   "specified biounit. Dumping structures "
                                   "fails in this case.")
            else:
                raise
    return out

def _Main():

    args = _ParseArgs()
    try:
        compute_cad = args.cad_score or args.local_cad_score
        if compute_cad and not args.residue_number_alignment:
            raise RuntimeError("Only support CAD score when residue numbers in "
                               "model and reference match. Use -rna flag if "
                               "this is the case.")
        reference = _LoadStructure(args.reference,
                                   sformat=args.reference_format,
                                   bu_idx=args.reference_biounit,
                                   fault_tolerant = args.fault_tolerant)
        model = _LoadStructure(args.model,
                               sformat=args.model_format,
                               bu_idx=args.model_biounit,
                               fault_tolerant = args.fault_tolerant)
        out = _Process(model, reference, args)

        # append input arguments
        out["model"] = args.model
        out["reference"] = args.reference
        out["fault_tolerant"] = args.fault_tolerant
        out["model_biounit"] = args.model_biounit
        out["reference_biounit"] = args.reference_biounit
        out["residue_number_alignment"] = args.residue_number_alignment
        out["enforce_consistency"] = args.enforce_consistency
        out["cad_exec"] = args.cad_exec
        out["usalign_exec"] = args.usalign_exec
        out["lddt_no_stereochecks"] = args.lddt_no_stereochecks
        out["status"] = "SUCCESS"
        with open(args.output, 'w') as fh:
            json.dump(out, fh, indent=4, sort_keys=False)
    except:
        out = dict()
        out["status"] = "FAILURE"
        out["traceback"] = traceback.format_exc()
        with open(args.output, 'w') as fh:
            json.dump(out, fh, indent=4, sort_keys=False)
        raise

if __name__ == '__main__':
    _Main()
