1 ################################################################################
\r
2 # THIS IS JABAWS EXECUTABLE CONFIGURATION FILE #
\r
3 ################################################################################
\r
5 ## Properties supported by all executables:
\r
7 ## Path to the native executable on windows,
\r
8 ## the path must be either absolute, or relative to JABAWS web application
\r
9 # local.<execname>.bin.windows=binaries/windows/clustalw2.exe
\r
11 ## Path to the native executable not on windows (e.g. Linux, Mac)
\r
12 ## the path must be either absolute, or relative to JABAWS web application
\r
13 # local.<execname>.bin=binaries/src/clustalw/src/clustalw2
\r
15 ## Path to the native executable on the cluster (must be accessible from all
\r
16 ## cluster nodes which will run JABAWS jobs). The path must be absolute.
\r
17 # cluster.<execname>.bin=/homes/pvtroshin/workspace/jaba2/binaries/src/clustalw/src/clustalw2
\r
19 ## At least one of the path to the native executable should be defined!
\r
21 ## Application supported presets, absolute or relative to
\r
22 ## the JABAWS web application path to the file. Optional.
\r
23 # <execname>.presets.file=conf/settings/ClustalPresets.xml
\r
25 ## Application supported parameters, absolute or relative to
\r
26 ## the JABAWS web application path to the file. Optional.
\r
27 # <execname>.parameters.file=conf/settings/ClustalParameters.xml
\r
29 ## Application limits, absolute or relative to
\r
30 ## the JABAWS web application path to the file. Optional.
\r
31 ## Use this if you want to limit the size of the job to be accepted by a
\r
32 ## Webservice. The limits files we use in Dundee are provided with JABAWS.
\r
33 # <execname>.limits.file=conf/settings/ClustalLimits.xml
\r
35 ## Flags passed to the cluster batch manager for the application. Optional.
\r
36 ## This example sets a maximum execution time to 24 hours and maximum amount of
\r
37 ## memory per task to 6Gb for SGE and OGE cluster batch managers.
\r
38 ## Please note that all the examples of this parameter below are correct for
\r
39 ## Sun Grid Engine or Open Grid Engine (untested) only! If you use a different
\r
40 ## batch manager you would need to specify different flags.
\r
41 # <execname>.cluster.settings=-l h_cpu=24:00:00 -l h_vmem=6000M -l ram=6000M
\r
43 ## Environmental variables required by native executables. Optional.
\r
44 ## Format: VARIABLE_NAME1#VARIABLE_VALUE1;VARIABLE_NAME2#VARIABLE_VALUE2;
\r
45 #<execname>.bin.env=MAFFT_BINARIES#binaries/src/mafft/binaries;FASTA_4_MAFFT#binaries/src/fasta34/fasta34;
\r
47 # Where <execname> is one of [clustalw, mafft, muscle, propcons, tcoffee,
\r
48 # iupred, jronn, globplot, disembl, aacon]
\r
51 ### Parameters specific to the JAVA Jar executables. ###
\r
53 ## Parameter supported by the executable Jar files, such as jronn and aacon,
\r
54 ## point to the location of the jar file. Also, local.<execname>.bin,
\r
55 ## local.<execname>.bin.windows properties are optional for these, if not
\r
56 ## provided they will be replaced to the java executable path from JAVA_HOME
\r
57 ## environmental variable.
\r
58 #<execname>.jar.file=binaries/windows/bj3.0.4p-jronn.jar
\r
63 ###### CLUSTAL CONFIGURATION ######
\r
64 local.clustalw.bin.windows=binaries/windows/clustalw2.exe
\r
65 local.clustalw.bin=binaries/src/clustalw/src/clustalw2
\r
66 #cluster.clustalw.bin=/homes/pvtroshin/workspace/jaba2/binaries/src/clustalw/src/clustalw2
\r
67 ## This parameters specifies the directory where the matrices files are stored
\r
68 clustalw.-matrix.path=binaries/matrices
\r
69 clustalw.presets.file=conf/settings/ClustalPresets.xml
\r
70 clustalw.parameters.file=conf/settings/ClustalParameters.xml
\r
71 #clustalw.limits.file=conf/settings/ClustalLimits.xml
\r
72 #clustalw.cluster.settings=-l h_cpu=24:00:00 -l ram=6000M
\r
75 ###### CLUSTAL OMEGA CONFIGURATION ######
\r
76 local.clustalo.bin.windows=binaries/windows/clustalo/clustalo.exe
\r
77 local.clustalo.bin=binaries/src/clustalo/src/clustalo
\r
78 #cluster.clustalo.bin=/homes/pvtroshin/workspace/jaba2/binaries/src/clustalo/src/clustalo
\r
79 ## This parameters specifies the directory where the matrices files are stored
\r
80 #clustalo.presets.file=conf/settings/ClustaloPresets.xml
\r
81 clustalo.parameters.file=conf/settings/ClustaloParameters.xml
\r
82 #clustalo.limits.file=conf/settings/ClustaloLimits.xml
\r
83 ## ClustalO can be executed on multiple CPUs if run on the cluster.
\r
84 ## This parameter specifies the number of CPUs to use
\r
85 #clustalo.cluster.cpunum=4
\r
86 ## This reserves a slot with CPUNUM on the cluster for the task
\r
87 #clustalo.cluster.settings=-q 64bit-pri.q -pe smp 4 -l ram=1700M -l h_cpu=24:00:00
\r
90 ###### MUSCLE CONFIGURATION ######
\r
91 local.muscle.bin.windows=binaries/windows/muscle.exe
\r
92 local.muscle.bin=binaries/src/muscle/muscle
\r
93 #cluster.muscle.bin=/homes/pvtroshin/workspace/jaba2/binaries/src/muscle/muscle
\r
94 ## This parameters specifies the directory where the matrices files are stored
\r
95 muscle.-matrix.path=binaries/matrices
\r
96 muscle.presets.file=conf/settings/MusclePresets.xml
\r
97 muscle.parameters.file=conf/settings/MuscleParameters.xml
\r
98 #muscle.limits.file=conf/settings/MuscleLimits.xml
\r
99 #muscle.cluster.settings=-l h_cpu=24:00:00 -l ram=6000M
\r
102 ##### MAFFT CONFIGURATION ######
\r
103 local.mafft.bin=binaries/src/mafft/scripts/mafft
\r
104 #cluster.mafft.bin=/homes/pvtroshin/workspace/jaba2/binaries/src/mafft/scripts/mafft
\r
105 # These paths will be converted to absolute if relative.
\r
106 mafft.bin.env=MAFFT_BINARIES#binaries/src/mafft/binaries;FASTA_4_MAFFT#binaries/src/fasta34/fasta34;
\r
107 ## This parameters specifies the directory where the matrices files are stored
\r
108 mafft.--aamatrix.path=binaries/matrices
\r
109 mafft.presets.file=conf/settings/MafftPresets.xml
\r
110 mafft.parameters.file=conf/settings/MafftParameters.xml
\r
111 #mafft.limits.file=conf/settings/MafftLimits.xml
\r
112 #mafft.cluster.settings=-l h_cpu=24:00:00 -l ram=6000M
\r
115 ##### TCOFFEE CONFIGURATION ######
\r
116 local.tcoffee.bin=binaries/src/tcoffee/t_coffee_source/t_coffee
\r
117 #cluster.tcoffee.bin=/homes/pvtroshin/workspace/jaba2/binaries/src/tcoffee/t_coffee_source/t_coffee
\r
118 # This variable is required by tcoffee
\r
119 tcoffee.bin.env=HOME_4_TCOFFEE#jobsout;
\r
120 tcoffee.presets.file=conf/settings/TcoffeePresets.xml
\r
121 tcoffee.parameters.file=conf/settings/TcoffeeParameters.xml
\r
122 #tcoffee.limits.file=conf/settings/TcoffeeLimits.xml
\r
123 ## Tcoffee can be executed on multiple CPUs if run on the cluster.
\r
124 ## This parameter specifies the number of CPUs to use
\r
125 #tcoffee.cluster.cpunum=4
\r
126 ## This reserves a slot with CPUNUM on the cluster for the task
\r
127 #tcoffee.cluster.settings=-q 64bit-pri.q -pe smp 4 -l ram=1700M -l h_cpu=24:00:00
\r
130 ##### PROBCONS CONFIGURATION ######
\r
131 local.probcons.bin=binaries/src/probcons/probcons
\r
132 #cluster.probcons.bin=/homes/pvtroshin/workspace/jaba2/binaries/src/probcons/probcons
\r
133 probcons.parameters.file=conf/settings/ProbconsParameters.xml
\r
134 #probcons.limits.file=conf/settings/ProbconsLimits.xml
\r
135 #probcons.cluster.settings=-l h_cpu=24:00:00 -l ram=6000M
\r
138 ##### JRONN CONFIGURATION ######
\r
139 ## If no local path is specified, Java is loaded from JAVA_HOME environment
\r
140 ## variable for local execution. However, cluster.jronn.bin must be specified for
\r
141 ## running Jronn on the cluster.
\r
142 #local.jronn.bin.windows=D:\\Java\\jdk1.6.0_24\\bin\\java.exe
\r
143 #local.jronn.bin=/sw/java/latest/bin/java
\r
144 #cluster.jronn.bin=/sw/java/latest/bin/java
\r
145 jronn.jar.file=binaries/windows/jronn3.1.jar
\r
146 #jronn.limits.file=conf/settings/JronnLimits.xml
\r
147 ## Jronn can use multiple CPUs
\r
148 ## This parameter specifies the number of CPU to use
\r
149 #jronn.cluster.cpunum=4
\r
150 ## This reserves a slot with CPUNUM on the cluster for the task
\r
151 #jronn.cluster.settings=-q 64bit-pri.q -pe smp 4 -l h_cpu=24:00:00
\r
154 ##### DISEMBL CONFIGURATION ######
\r
155 local.disembl.bin=binaries/src/disembl/DisEMBL.py
\r
156 #cluster.disembl.bin=/homes/pvtroshin/workspace/jaba2/binaries/src/disembl/DisEMBL.py
\r
157 #disembl.limits.file=conf/settings/DisemblLimits.xml
\r
158 #disembl.cluster.settings=-l h_cpu=24:00:00 -l ram=6000M
\r
161 ##### GLOBPLOT CONFIGURATION ######
\r
162 local.globplot.bin=binaries/src/globplot/GlobPlot.py
\r
163 #cluster.globplot.bin=/homes/pvtroshin/workspace/jaba2/binaries/src/globplot/GlobPlot.py
\r
164 #globplot.bin.env=PYTHONPATH#/homes/pvtroshin/workspace/jaba2/binaries/src/globplot/biopython-1.50
\r
165 globplot.limits.file=conf/settings/GlobPlotLimits.xml
\r
166 #globplot.cluster.settings=-l h_cpu=24:00:00 -l ram=6000M
\r
169 ##### IUPRED CONFIGURATION ######
\r
170 local.iupred.bin.windows=binaries/windows/iupred/iupred.exe
\r
171 local.iupred.bin=binaries/src/iupred/iupred
\r
172 ## This must point to the directory where iupred binary is, with other files it
\r
173 ## depends on. This path will be converted to absolute if relative at runtime.
\r
174 iupred.bin.env=IUPred_PATH#binaries/src/iupred
\r
175 #cluster.iupred.bin=/homes/pvtroshin/workspace/jaba2/binaries/src/iupred/iupred
\r
176 iupred.parameters.file=conf/settings/IUPredParameters.xml
\r
177 #iupred.limits.file=conf/settings/IUPredLimits.xml
\r
178 #iupred.cluster.settings=-l h_cpu=24:00:00 -l ram=6000M
\r
181 ##### AACON CONFIGURATION ######
\r
182 # This is just a path to the standard java executable
\r
183 #local.aacon.bin.windows=D:\\Java\\jdk1.6.0_24\\bin\\java.exe
\r
184 #local.aacon.bin=/sw/java/latest/bin/java
\r
185 #cluster.aacon.bin=/sw/java/latest/bin/java
\r
186 # Path to the AACon library
\r
187 aacon.jar.file=binaries/windows/aaconservation.jar
\r
188 aacon.parameters.file=conf/settings/AAConParameters.xml
\r
189 aacon.presets.file=conf/settings/AAConPresets.xml
\r
190 #aacon.limits.file=conf/settings/AAConLimits.xml
\r
191 ## AACon can use multiple CPUs
\r
192 ## This parameter specifies the number of CPUs to use
\r
193 #aacon.cluster.cpunum=4
\r
194 ## This reserves a slot with CPUNUM on the cluster for the task
\r
195 #aacon.cluster.settings=-q 64bit-pri.q -pe smp 4 -l ram=1700M -l h_cpu=24:00:00
\r