+ else if ( p_array.length > 1 ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "file [" + intree_file
+ + "] contains more than one phylogeny in phyloXML format" );
+ }
+ intree = p_array[ 0 ];
+ }
+ catch ( final Exception e ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "failed to read input tree from file [" + intree_file
+ + "]: " + error );
+ }
+ if ( ( intree == null ) || intree.isEmpty() ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "input tree [" + intree_file + "] is empty" );
+ }
+ if ( !intree.isRooted() ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "input tree [" + intree_file + "] is not rooted" );
+ }
+ if ( intree.getNumberOfExternalNodes() < number_of_genomes ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME,
+ "number of external nodes [" + intree.getNumberOfExternalNodes()
+ + "] of input tree [" + intree_file
+ + "] is smaller than the number of genomes the be analyzed ["
+ + number_of_genomes + "]" );
+ }
+ final StringBuilder parent_names = new StringBuilder();
+ final int nodes_lacking_name = getNumberOfNodesLackingName( intree, parent_names );
+ if ( nodes_lacking_name > 0 ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "input tree [" + intree_file + "] has "
+ + nodes_lacking_name + " node(s) lacking a name [parent names:" + parent_names + "]" );
+ }
+ preparePhylogenyForParsimonyAnalyses( intree, input_file_properties );
+ if ( !intree.isCompletelyBinary() ) {
+ ForesterUtil.printWarningMessage( surfacing.PRG_NAME, "input tree [" + intree_file
+ + "] is not completely binary" );
+ }
+ intrees[ i++ ] = intree;
+ }
+ return intrees;
+ }
+
+ public static Phylogeny obtainFirstIntree( final File intree_file ) {
+ Phylogeny intree = null;
+ final String error = ForesterUtil.isReadableFile( intree_file );
+ if ( !ForesterUtil.isEmpty( error ) ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "cannot read input tree file [" + intree_file + "]: " + error );
+ }
+ try {
+ final Phylogeny[] phys = ParserBasedPhylogenyFactory.getInstance()
+ .create( intree_file, ParserUtils.createParserDependingOnFileType( intree_file, true ) );
+ if ( phys.length < 1 ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "file [" + intree_file
+ + "] does not contain any phylogeny in phyloXML format" );
+ }
+ else if ( phys.length > 1 ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "file [" + intree_file
+ + "] contains more than one phylogeny in phyloXML format" );
+ }
+ intree = phys[ 0 ];
+ }
+ catch ( final Exception e ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "failed to read input tree from file [" + intree_file + "]: "
+ + error );
+ }
+ if ( ( intree == null ) || intree.isEmpty() ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "input tree [" + intree_file + "] is empty" );
+ }
+ if ( !intree.isRooted() ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "input tree [" + intree_file + "] is not rooted" );
+ }
+ return intree;
+ }
+
+ public static String obtainHexColorStringDependingOnTaxonomyGroup( final String tax_code, final Phylogeny phy )
+ throws IllegalArgumentException {
+ if ( !_TAXCODE_HEXCOLORSTRING_MAP.containsKey( tax_code ) ) {
+ if ( ( phy != null ) && !phy.isEmpty() ) {
+ // final List<PhylogenyNode> nodes = phy.getNodesViaTaxonomyCode( tax_code );
+ // Color c = null;
+ // if ( ( nodes == null ) || nodes.isEmpty() ) {
+ // throw new IllegalArgumentException( "code " + tax_code + " is not found" );
+ // }
+ // if ( nodes.size() != 1 ) {
+ // throw new IllegalArgumentException( "code " + tax_code + " is not unique" );
+ // }
+ // PhylogenyNode n = nodes.get( 0 );
+ // while ( n != null ) {
+ // if ( n.getNodeData().isHasTaxonomy()
+ // && !ForesterUtil.isEmpty( n.getNodeData().getTaxonomy().getScientificName() ) ) {
+ // c = ForesterUtil.obtainColorDependingOnTaxonomyGroup( n.getNodeData().getTaxonomy()
+ // .getScientificName(), tax_code );
+ // }
+ // if ( ( c == null ) && !ForesterUtil.isEmpty( n.getName() ) ) {
+ // c = ForesterUtil.obtainColorDependingOnTaxonomyGroup( n.getName(), tax_code );
+ // }
+ // if ( c != null ) {
+ // break;
+ // }
+ // n = n.getParent();
+ // }
+ final String group = obtainTaxonomyGroup( tax_code, phy );
+ final Color c = ForesterUtil.obtainColorDependingOnTaxonomyGroup( group );
+ if ( c == null ) {
+ throw new IllegalArgumentException( "no color found for taxonomy group \"" + group
+ + "\" for code \"" + tax_code + "\"" );
+ }
+ final String hex = String.format( "#%02x%02x%02x", c.getRed(), c.getGreen(), c.getBlue() );
+ _TAXCODE_HEXCOLORSTRING_MAP.put( tax_code, hex );
+ }
+ else {
+ throw new IllegalArgumentException( "unable to obtain color for code " + tax_code
+ + " (tree is null or empty and code is not in map)" );
+ }
+ }
+ return _TAXCODE_HEXCOLORSTRING_MAP.get( tax_code );
+ }
+
+ public static String obtainTaxonomyGroup( final String tax_code, final Phylogeny species_tree )
+ throws IllegalArgumentException {
+ if ( !_TAXCODE_TAXGROUP_MAP.containsKey( tax_code ) ) {
+ if ( ( species_tree != null ) && !species_tree.isEmpty() ) {
+ final List<PhylogenyNode> nodes = species_tree.getNodesViaTaxonomyCode( tax_code );
+ if ( ( nodes == null ) || nodes.isEmpty() ) {
+ throw new IllegalArgumentException( "code " + tax_code + " is not found" );
+ }
+ if ( nodes.size() != 1 ) {
+ throw new IllegalArgumentException( "code " + tax_code + " is not unique" );
+ }
+ PhylogenyNode n = nodes.get( 0 );
+ String group = null;
+ while ( n != null ) {
+ if ( n.getNodeData().isHasTaxonomy()
+ && !ForesterUtil.isEmpty( n.getNodeData().getTaxonomy().getScientificName() ) ) {
+ group = ForesterUtil.obtainNormalizedTaxonomyGroup( n.getNodeData().getTaxonomy()
+ .getScientificName() );
+ }
+ if ( ForesterUtil.isEmpty( group ) && !ForesterUtil.isEmpty( n.getName() ) ) {
+ group = ForesterUtil.obtainNormalizedTaxonomyGroup( n.getName() );
+ }
+ if ( !ForesterUtil.isEmpty( group ) ) {
+ break;
+ }
+ n = n.getParent();
+ }
+ if ( ForesterUtil.isEmpty( group ) ) {
+ throw new IllegalArgumentException( "no group found for taxonomy code \"" + tax_code + "\"" );
+ }
+ _TAXCODE_TAXGROUP_MAP.put( tax_code, group );
+ }
+ else {
+ throw new IllegalArgumentException( "unable to obtain group for code " + tax_code
+ + " (tree is null or empty and code is not in map)" );
+ }
+ }
+ return _TAXCODE_TAXGROUP_MAP.get( tax_code );
+ }
+
+ public static void performDomainArchitectureAnalysis( final SortedMap<String, Set<String>> domain_architecutures,
+ final SortedMap<String, Integer> domain_architecuture_counts,
+ final int min_count,
+ final File da_counts_outfile,
+ final File unique_da_outfile ) {
+ checkForOutputFileWriteability( da_counts_outfile );
+ checkForOutputFileWriteability( unique_da_outfile );
+ try {
+ final BufferedWriter da_counts_out = new BufferedWriter( new FileWriter( da_counts_outfile ) );
+ final BufferedWriter unique_da_out = new BufferedWriter( new FileWriter( unique_da_outfile ) );
+ final Iterator<Entry<String, Integer>> it = domain_architecuture_counts.entrySet().iterator();
+ while ( it.hasNext() ) {
+ final Map.Entry<String, Integer> e = it.next();
+ final String da = e.getKey();
+ final int count = e.getValue();
+ if ( count >= min_count ) {
+ da_counts_out.write( da );
+ da_counts_out.write( "\t" );
+ da_counts_out.write( String.valueOf( count ) );
+ da_counts_out.write( ForesterUtil.LINE_SEPARATOR );
+ }
+ if ( count == 1 ) {
+ final Iterator<Entry<String, Set<String>>> it2 = domain_architecutures.entrySet().iterator();
+ while ( it2.hasNext() ) {
+ final Map.Entry<String, Set<String>> e2 = it2.next();
+ final String genome = e2.getKey();
+ final Set<String> das = e2.getValue();
+ if ( das.contains( da ) ) {
+ unique_da_out.write( genome );
+ unique_da_out.write( "\t" );
+ unique_da_out.write( da );
+ unique_da_out.write( ForesterUtil.LINE_SEPARATOR );
+ }
+ }
+ }
+ }
+ unique_da_out.close();
+ da_counts_out.close();
+ }
+ catch ( final IOException e ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, e.getMessage() );
+ }
+ ForesterUtil.programMessage( surfacing.PRG_NAME, "Wrote distance matrices to \"" + da_counts_outfile + "\"" );
+ ForesterUtil.programMessage( surfacing.PRG_NAME, "Wrote distance matrices to \"" + unique_da_outfile + "\"" );
+ //
+ }
+
+ public static void preparePhylogeny( final Phylogeny p,
+ final DomainParsimonyCalculator domain_parsimony,
+ final String date_time,
+ final String method,
+ final String name,
+ final String parameters_str ) {
+ domain_parsimony.decoratePhylogenyWithDomains( p );
+ final StringBuilder desc = new StringBuilder();
+ desc.append( "[Method: " + method + "] [Date: " + date_time + "] " );
+ desc.append( "[Cost: " + domain_parsimony.getCost() + "] " );
+ desc.append( "[Gains: " + domain_parsimony.getTotalGains() + "] " );
+ desc.append( "[Losses: " + domain_parsimony.getTotalLosses() + "] " );
+ desc.append( "[Unchanged: " + domain_parsimony.getTotalUnchanged() + "] " );
+ desc.append( "[Parameters: " + parameters_str + "]" );
+ p.setName( name );
+ p.setDescription( desc.toString() );
+ p.setConfidence( new Confidence( domain_parsimony.getCost(), "parsimony" ) );
+ p.setRerootable( false );
+ p.setRooted( true );
+ }
+
+ public static void preparePhylogenyForParsimonyAnalyses( final Phylogeny intree,
+ final String[][] input_file_properties ) {
+ final String[] genomes = new String[ input_file_properties.length ];
+ for( int i = 0; i < input_file_properties.length; ++i ) {
+ if ( intree.getNodes( input_file_properties[ i ][ 1 ] ).size() > 1 ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "node named [" + input_file_properties[ i ][ 1 ]
+ + "] is not unique in input tree " + intree.getName() );
+ }
+ genomes[ i ] = input_file_properties[ i ][ 1 ];
+ }
+ //
+ final PhylogenyNodeIterator it = intree.iteratorPostorder();
+ while ( it.hasNext() ) {
+ final PhylogenyNode n = it.next();
+ if ( ForesterUtil.isEmpty( n.getName() ) ) {
+ if ( n.getNodeData().isHasTaxonomy()
+ && !ForesterUtil.isEmpty( n.getNodeData().getTaxonomy().getTaxonomyCode() ) ) {
+ n.setName( n.getNodeData().getTaxonomy().getTaxonomyCode() );
+ }
+ else if ( n.getNodeData().isHasTaxonomy()
+ && !ForesterUtil.isEmpty( n.getNodeData().getTaxonomy().getScientificName() ) ) {
+ n.setName( n.getNodeData().getTaxonomy().getScientificName() );
+ }
+ else if ( n.getNodeData().isHasTaxonomy()
+ && !ForesterUtil.isEmpty( n.getNodeData().getTaxonomy().getCommonName() ) ) {
+ n.setName( n.getNodeData().getTaxonomy().getCommonName() );
+ }
+ else {
+ ForesterUtil
+ .fatalError( surfacing.PRG_NAME,
+ "node with no name, scientific name, common name, or taxonomy code present" );
+ }
+ }
+ }
+ //
+ final List<String> igns = PhylogenyMethods.deleteExternalNodesPositiveSelection( genomes, intree );
+ if ( igns.size() > 0 ) {
+ System.out.println( "Not using the following " + igns.size() + " nodes:" );
+ for( int i = 0; i < igns.size(); ++i ) {
+ System.out.println( " " + i + ": " + igns.get( i ) );
+ }
+ System.out.println( "--" );
+ }
+ for( final String[] input_file_propertie : input_file_properties ) {
+ try {
+ intree.getNode( input_file_propertie[ 1 ] );
+ }
+ catch ( final IllegalArgumentException e ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "node named [" + input_file_propertie[ 1 ]
+ + "] not present/not unique in input tree" );
+ }
+ }
+ }
+
+ public static void printOutPercentageOfMultidomainProteins( final SortedMap<Integer, Integer> all_genomes_domains_per_potein_histo,
+ final Writer log_writer ) {
+ int sum = 0;
+ for( final Entry<Integer, Integer> entry : all_genomes_domains_per_potein_histo.entrySet() ) {
+ sum += entry.getValue();
+ }
+ final double percentage = ( 100.0 * ( sum - all_genomes_domains_per_potein_histo.get( 1 ) ) ) / sum;
+ ForesterUtil.programMessage( surfacing.PRG_NAME, "Percentage of multidomain proteins: " + percentage + "%" );
+ log( "Percentage of multidomain proteins: : " + percentage + "%", log_writer );
+ }
+
+ public static void processFilter( final File filter_file, final SortedSet<String> filter ) {
+ SortedSet<String> filter_str = null;
+ try {
+ filter_str = ForesterUtil.file2set( filter_file );
+ }
+ catch ( final IOException e ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, e.getMessage() );
+ }
+ if ( filter_str != null ) {
+ for( final String string : filter_str ) {
+ filter.add( string );
+ }
+ }
+ if ( surfacing.VERBOSE ) {
+ System.out.println( "Filter:" );
+ for( final String domainId : filter ) {
+ System.out.println( domainId );
+ }
+ }
+ }
+
+ public static String[][] processInputGenomesFile( final File input_genomes ) {
+ String[][] input_file_properties = null;
+ try {
+ input_file_properties = ForesterUtil.file22dArray( input_genomes );
+ }
+ catch ( final IOException e ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME,
+ "genomes files is to be in the following format \"<hmmpfam output file> <species>\": "
+ + e.getLocalizedMessage() );
+ }
+ final Set<String> specs = new HashSet<String>();
+ final Set<String> paths = new HashSet<String>();
+ for( int i = 0; i < input_file_properties.length; ++i ) {
+ if ( !PhyloXmlUtil.TAXOMONY_CODE_PATTERN.matcher( input_file_properties[ i ][ 1 ] ).matches() ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "illegal format for species code: "
+ + input_file_properties[ i ][ 1 ] );
+ }
+ if ( specs.contains( input_file_properties[ i ][ 1 ] ) ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "species code " + input_file_properties[ i ][ 1 ]
+ + " is not unique" );
+ }
+ specs.add( input_file_properties[ i ][ 1 ] );
+ if ( paths.contains( input_file_properties[ i ][ 0 ] ) ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "path " + input_file_properties[ i ][ 0 ]
+ + " is not unique" );
+ }
+ paths.add( input_file_properties[ i ][ 0 ] );
+ final String error = ForesterUtil.isReadableFile( new File( input_file_properties[ i ][ 0 ] ) );
+ if ( !ForesterUtil.isEmpty( error ) ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, error );
+ }
+ }
+ return input_file_properties;
+ }
+
+ public static void processPlusMinusAnalysisOption( final CommandLineArguments cla,
+ final List<String> high_copy_base,
+ final List<String> high_copy_target,
+ final List<String> low_copy,
+ final List<Object> numbers ) {
+ if ( cla.isOptionSet( surfacing.PLUS_MINUS_ANALYSIS_OPTION ) ) {
+ if ( !cla.isOptionValueSet( surfacing.PLUS_MINUS_ANALYSIS_OPTION ) ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "no value for 'plus-minus' file: -"
+ + surfacing.PLUS_MINUS_ANALYSIS_OPTION + "=<file>" );
+ }
+ final File plus_minus_file = new File( cla.getOptionValue( surfacing.PLUS_MINUS_ANALYSIS_OPTION ) );
+ final String msg = ForesterUtil.isReadableFile( plus_minus_file );
+ if ( !ForesterUtil.isEmpty( msg ) ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "can not read from \"" + plus_minus_file + "\": " + msg );
+ }
+ processPlusMinusFile( plus_minus_file, high_copy_base, high_copy_target, low_copy, numbers );
+ }
+ }
+
+ // First numbers is minimal difference, second is factor.
+ public static void processPlusMinusFile( final File plus_minus_file,
+ final List<String> high_copy_base,
+ final List<String> high_copy_target,
+ final List<String> low_copy,
+ final List<Object> numbers ) {
+ Set<String> species_set = null;
+ int min_diff = surfacing.PLUS_MINUS_ANALYSIS_MIN_DIFF_DEFAULT;
+ double factor = surfacing.PLUS_MINUS_ANALYSIS_FACTOR_DEFAULT;
+ try {
+ species_set = ForesterUtil.file2set( plus_minus_file );
+ }
+ catch ( final IOException e ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, e.getMessage() );
+ }
+ if ( species_set != null ) {
+ for( final String species : species_set ) {
+ final String species_trimmed = species.substring( 1 );
+ if ( species.startsWith( "+" ) ) {
+ if ( low_copy.contains( species_trimmed ) ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME,
+ "species/genome names can not appear with both '+' and '-' suffix, as appears the case for: \""
+ + species_trimmed + "\"" );
+ }
+ high_copy_base.add( species_trimmed );
+ }
+ else if ( species.startsWith( "*" ) ) {
+ if ( low_copy.contains( species_trimmed ) ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME,
+ "species/genome names can not appear with both '*' and '-' suffix, as appears the case for: \""
+ + species_trimmed + "\"" );
+ }
+ high_copy_target.add( species_trimmed );
+ }
+ else if ( species.startsWith( "-" ) ) {
+ if ( high_copy_base.contains( species_trimmed ) || high_copy_target.contains( species_trimmed ) ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME,
+ "species/genome names can not appear with both '+' or '*' and '-' suffix, as appears the case for: \""
+ + species_trimmed + "\"" );
+ }
+ low_copy.add( species_trimmed );
+ }
+ else if ( species.startsWith( "$D" ) ) {
+ try {
+ min_diff = Integer.parseInt( species.substring( 3 ) );
+ }
+ catch ( final NumberFormatException e ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME,
+ "could not parse integer value for minimal difference from: \""
+ + species.substring( 3 ) + "\"" );
+ }
+ }
+ else if ( species.startsWith( "$F" ) ) {
+ try {
+ factor = Double.parseDouble( species.substring( 3 ) );
+ }
+ catch ( final NumberFormatException e ) {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "could not parse double value for factor from: \""
+ + species.substring( 3 ) + "\"" );
+ }
+ }
+ else if ( species.startsWith( "#" ) ) {
+ // Comment, ignore.
+ }
+ else {
+ ForesterUtil
+ .fatalError( surfacing.PRG_NAME,
+ "species/genome names in 'plus minus' file must begin with '*' (high copy target genome), '+' (high copy base genomes), '-' (low copy genomes), '$D=<integer>' minimal Difference (default is 1), '$F=<double>' factor (default is 1.0), double), or '#' (ignore) suffix, encountered: \""
+ + species + "\"" );
+ }
+ numbers.add( new Integer( min_diff + "" ) );
+ numbers.add( new Double( factor + "" ) );
+ }
+ }
+ else {
+ ForesterUtil.fatalError( surfacing.PRG_NAME, "'plus minus' file [" + plus_minus_file + "] appears empty" );
+ }
+ }
+
+ /*
+ * species | protein id | n-terminal domain | c-terminal domain | n-terminal domain per domain E-value | c-terminal domain per domain E-value
+ *
+ *
+ */
+ static public StringBuffer proteinToDomainCombinations( final Protein protein,
+ final String protein_id,
+ final String separator ) {
+ final StringBuffer sb = new StringBuffer();
+ if ( protein.getSpecies() == null ) {
+ throw new IllegalArgumentException( "species must not be null" );
+ }
+ if ( ForesterUtil.isEmpty( protein.getSpecies().getSpeciesId() ) ) {
+ throw new IllegalArgumentException( "species id must not be empty" );
+ }
+ final List<Domain> domains = protein.getProteinDomains();
+ if ( domains.size() > 1 ) {
+ final Map<String, Integer> counts = new HashMap<String, Integer>();
+ for( final Domain domain : domains ) {
+ final String id = domain.getDomainId();
+ if ( counts.containsKey( id ) ) {
+ counts.put( id, counts.get( id ) + 1 );
+ }
+ else {
+ counts.put( id, 1 );