#! /bin/sh
# This is a shell archive, meaning:
# 1. Remove everything above the #! /bin/sh line.
# 2. Save the resulting text in a file.
# 3. Execute the file with /bin/sh (not csh) to create the files:
# COPYING
# Makefile
# README
# ci.f
# cmprss.f
# commas.f
# csmspl.f
# cspl.f
# gauss.f
# grade.f
# hdeh.f
# init.f
# iter.f
# lms.f
# loop.f
# loopx.f
# match.f
# plot.f
# range.f
# rdfree.f
# rfcspl.f
# rsave.f
# strch.f
# trace.f
# upd.f
# ustrch.f
# This archive created: Mon Mar 11 10:31:36 1996
export PATH; PATH=/bin:$PATH
if test -f 'COPYING'
then
echo shar: will not over-write existing file "'COPYING'"
else
cat << \SHAR_EOF > 'COPYING'
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
675 Mass Ave, Cambridge, MA 02139, USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
SHAR_EOF
fi # end of overwriting check
if test -f 'Makefile'
then
echo shar: will not over-write existing file "'Makefile'"
else
cat << \SHAR_EOF > 'Makefile'
FFLAGS = -fast
SOURCES:sh = ls -1 *.f
#HEADERS:sh = ls -1 *.inc
OBJECTS:sh = ls -1 *.f | sed 's/\.f/.o/'
lms: lib.a ${SOURCES} ${HEADERS}
f77 ${FFLAGS} -o lms -umain lib.a -lm92
lib.a: lib.a(${OBJECTS})
lib.a(%.o): %.f ${HEADERS}
f77 ${FFLAGS} -c -o $% $<
${AR} ${ARFLAGS} $@ $%
rm -f $%
.PRECIOUS: lib.a
SHAR_EOF
fi # end of overwriting check
if test -f 'README'
then
echo shar: will not over-write existing file "'README'"
else
cat << \SHAR_EOF > 'README'
INSTRUCTIONS FOR USING THE LMS PROGRAM VERSION 2
INTRODUCTION
This note provides details of a program written in standard FORTRAN 77 to fit
smoothed centile curves to reference data (Cole TJ, Green PJ. Smoothing
reference centile curves: the LMS method and penalized likelihood. Statistics
in Medicine 1992;11:1305-19).
The program models variable y as a semiparametric regression function of a
time-dependent variable t, so that the distribution of y changes smoothly when
plotted against t. The distribution is summarised by 3 time-varying spline
curves: the Box-Cox power that converts y to Normality (L), the mean (M), and
the coefficient of variation (S). The main application of the program is to
generate reference centile curves for growth charts.
The program saves the coefficients of the LMS curves to file, which then need
to be post-processed by other software. Some suitable Genstat procedures can be
obtained from Tim Cole (email address etc at the end of the file). With suitable
modifications to the subroutine plot.f (see PLOT in the reference section below)
the program will also display the fitted curves.
INSTALLATION
First unpack the shar file by typing sh in a suitable directory (you
have probably already done this). Then, in the same directory, compile and load
the program by typing the single word 'make'. This creates the library lib.a
and the executable program lms. Ignore the warning message from the compiler.
RUNNING THE PROGRAM
Run the program as:
lms
or (say):
lms < inputfile > outputfile 2> errorfile
in the usual Unix notation. Commands are read from standard input, output goes
to standard output, and errors (on logical unit 0) go to standard error.
The program expects the data as (t,y) pairs, where t is the time (or age) and y
is the reference data. It first asks for the data file name. If you don't
remember it, you can type !ls which passes the ls command to the Bourne shell
to list the files in the current directory. Other system commands can be passed
in the same way, by preceding them with '!'.
The program then asks for the format to read the data, which can be in either of
two forms. If read with a FORTRAN format it needs to be in brackets, e.g. for
age in the first 5 columns and weight in the next 5, give the format as: (2f5.0)
Alternatively if the data fields are separated by tab, comma or space(s), the
file can be read in free format. In this case give the format as two numbers
specifying the t and y fields respectively. Typing help (or h) at the prompt
gives further information.
To see the file format, type ? to display the first 4 lines of the file (or pass
the head command to the Bourne shell using ! the shell escape).
The program then reads the datafile and summarises the data. The output is of
the form:
4 missing data
1 non-positive data
n: 4011 nc: 1657
Range of t: 1.050 20.990
Range of y: 6.240 150.250
where n is the number of distinct t,y pairs (excluding those with t or y
missing, or y non-positive), and nc is the number of distinct t values.
The program then displays a > prompt. Typing help or h lists the commands that
can be entered at this point, which are described in the reference section
below.
A typical run might consist of the following set of commands (# indicates a
comment):
# data file
lms.dat
# format
1 2
# set edf for L, M and S curves to 1, 5 and 1 respectively
# so L and S are estimated as constants, and M is a 5 edf spline curve
edf 1 5 1
# fit model
fit
# make M curve less smooth
edf ,6
# fit model again
fit
# fit simple L and S curves with 3 edf
edf 3,,3
fit
# make S curve less smooth
edf ,,4
fit
# save results of fitted curves to file
save
# rename output file
! mv lms.01 lms.out.364
# end
quit
The save command creates a file lms.01, and this is then renamed. So at the end
of the run there is a file lms.out.364 with the saved LMS curves.
A general strategy for choosing suitable values for the edf is given under EDF
and FIT in the reference section below.
If required the commands can be put into an input file, and the program run in
background:
lms < inputfile
The output can also be saved to a file if required:
lms < inputfile > outputfile
REFERENCE SECTION
Each command at the > prompt consists of a command word, in upper or lower
case, which may be followed by a space and one or more arguments. The command
word can be abbreviated to one or more letters. Space(s) or a comma act as
separators between arguments. Entering the command without arguments, or using
comma as a place-holder, leaves argument(s) unchanged. For example the EDF
command (see below) expects 3 arguments, so typing edf ,6 means that the
second takes the value 6, while the first and third arguments are unaffected.
RANGE [tlow] [thigh]
Discards data outside the specified range of ages t. If either age limit is
outside the current limit it is ignored. New values for n, nc and the ranges of
t and y are displayed. This can be useful to focus on a restricted range of the
data.
The command without arguments displays current values of n and nc, and the
ranges of t and y.
EDF [edfL] [edfM] [edfS]
Specifies the equivalent degrees of freedom (edf) for respectively the L curve,
the M curve and the S curve. Without any arguments the command displays the
current edf values.
An edf of 2 corresponds to a straight line, and higher values give a
progressively more complex curve. An edf of 1 means that a constant value for
L, M or S is to be estimated, and an edf of 0 means that a constant value is to
be specified. This is most useful for L, where setting it to 1 forces a Normal
distribution. If edf = 0 a prompt is given for the required value. Negative edf
values are faulted.
In deciding what edf to use, remember that usually
edf for M >= edf for S >= edf for L
The best values to use will depend on the sample size, but starting with values
between 4 and 6 for M, and 1 for L and S, is sensible.
The strategy is then to optimize the M curve edf, by increasing and/or
decreasing the edf by 1 until the change in penalized log likelihood is small
(this depends on the sample size, but a change of less than 2 units is not
significant - for large samples a bigger change is needed). The final decision
will depend on the appearance of the M curve.
If the shape of the M curve is complex it may be worth changing the value of
tscale to 1 or -1 to improve the fit. Refer to the tscale definition for more
details.
Once the M curve is fitted, the process is repeated for the S curve. In many
situations a constant value (i.e. 1 edf) is sufficient, and this should be
tried first. 3 edf should then be tried (rather than 2 edf, which forces a
linear trend on the S curve and can lead to silly values at the extremes of the
range). Higher edf values may be suitable for large and/or complex datasets.
Finally the L curve is fitted, similarly to the S curve. Fitting 1 edf shows the
overall Box-Cox power transformation needed, and the value may be close to 1 or
0. If it is, try fitting the corresponding power explicitly (i.e. with 0 edf),
for comparison. Then try increasing the edf in steps of 1, as for the M and S
curves, again avoiding 2 edf initially.
ALPHA [alphaL] [alphaM] [alphaS]
Specifies the smoothing parameters alpha to be used for each of the L, M and S
curves, where larger values correspond to more smoothing. Normally this command
is not needed, as the EDF command is easier to use. However it can be useful on
occasions, and is also quicker to fit.
The ALPHA command without arguments displays the current alpha values.
LIMITS [nitin] [nitout] [tolin] [tolout] [toledf] [zcutoff]
Modifies various parameters affecting the iteration process. nitin is the
maximum number of inner iterations (default 10), nitout ditto for outer
iterations (20), tolin is the criterion for inner convergence (0.001), tolout
ditto for outer convergence (0.01), and toledf ditto for fitting edf (0.05).
Zcutoff is the cutoff defining how extreme points need to be during the
fitting process to be displayed as outliers (default +/-5.0).
The command without arguments displays the current limits.
TSCALE [tv]
Transforms the t scale. Currently there are two alternatives, a log transform
if tv is negative, and an empirical transform based on the shape of the fitted
M curve if tv is positive.
The default value is 0, which leaves the t scale as is. A negative value
transforms t to ln(t+offset), where offset depends on the value of tv and
whether or not the minimum value of t is negative, zero or positive:
min(t) +ve, offset = 0
min(t) = 0, offset = -tv/100 * max(t)
min(t) -ve, offset = (tv-1)*min(t)
So a sensible value for tv, if the age scale is to be log transformed, is -1.
At the other extreme, setting tv to 1, a two-stage fitting process is used. The
M curve is first fitted using the untransformed t scale. The t scale is then
mapped to the shape of the M curve, so that the M curve plotted against the
transformed t scale is a straight line. The M curve is then refitted on the
transformed t scale, and the original t scale is restored. By allowing the two
stages, the fit of complex monotonic data is greatly improved. Note though that
if the data are non-monotonic, this process can lead to spurious wiggles in the
fitted curves at points of inflexion.
The rationale for the process, apart from improving the goodness of fit, is to
let the skewness and variance of the measurement change in proportion to the
velocity of the measurement, on the grounds that when the velocity is high,
e.g. in infancy, the skewness and variance are likely to change more rapidly
than at other ages.
A value of tv between 0 and 1 uses a mixture of the original and
velocity-mapped t scale for the second fit.
The TSCALE command with no arguments displays the current value of tv.
FIT
Fits the model using the specified values for edf and tv. Two output lines
summarise details of the fit:
alpha: 0.1830E+02 0.1979E+02 0.2832E+05 edf: 6.99 7.00 7.00
itout: 9 5283.00 4.88 67.80 6.56 5243.38 0.00
The 3 alpha and edf values are for the L, M and S curves respectively. If an edf
value is 0 or 1, meaning that the relevant curve is a constant, the alpha value
displayed is the curve's constant value.
itout is the number of outer iterations, and the next 5 values are components of
the penalized (log) likelihood. The first (5283.00) is the unpenalized
likelihood, the next 3 are the roughness penalties associated with the L, M and
S curves (i.e. the product of alpha and the summed squared second derivative
for each curve), and the fifth is the penalized likelihood, equal to the
likelihood less half the sum of the 3 roughness penalties. The final value on
the line is tv, the tscale setting (qv).
For comparing models, the penalized likelihood is what counts. The difference in
penalized likelihood between models can be treated (roughly) as 0.5 chi^2
distributed, so that a change of 1 edf is significant if the penalized
likelihood changes by more than 2 units. However for large samples this is a
tiny difference, and the best criterion for the choice of edf is a
judicious mix
of penalized likelihood and the appearance of the LMS curves.
MONITOR [on|off|yes|no]
Switches on or off the monitor flag, which monitors the progress of the fitting
process. monitor with no argument toggles its value.
With monitor on, the info printed at each outer iteration of the fit looks like
this:
itout: 2 -656.15 24.00 14.70 6.13 633.73 0.00 itin: 10
-4.75 7.00 6.62 0.4218E+03
-4.18 7.00 6.59 0.1426E+03
-3.44 7.00 6.58 0.2278E+05
The first line is the same as the second line of the fit summary, with the
addition of itin at the end, which is the number of inner iterations. The
unpenalized likelihood appears with negative sign until the inner loop has
converged. If the value of tv, the tscale setting, has been set positive, it is
displayed as 0 to start with, and changes to the specified value part-way
through the iteration process. If tv has been set negative it displays this
value throughout.
The next 3 lines are for L, M and S respectively. The first negative number is
the regression slope of log alpha on log edf (set to -5 initially), the next is
the target edf (7 here), the next is the current edf, and the last is the
current alpha value. If the regression slope becomes small (say -1 or so), this
is a sign that the iteration is going out of control.
If the target edf is 1, which fits a constant value rather than a spline curve
to the data, the relevant line is of the form 'fixed: 0.661'. Unlike edfs of 2
or more, this is fitted in a single pass. If the edf is 0, the line is not
displayed at all.
These 3 monitoring lines continue to appear until the edf are all within the
specified tolerance of the target edf (toledf). At this point the alphas are
frozen, and the outer iteration continues to convergence (i.e. when the change
in likelihood from one cycle to the next is within the tolerance tolout).
This monitoring output is not usually of interest, but can be useful when
non-convergence is a problem, e.g. if any of the regression slopes become small
or the likelihood falls having peaked. Another sign of non-convergence is the M
curve or the S curve becoming non-positive.
One common reason for failure to converge is outlying data. The program
displays points whose absolute standard deviation score (Z score) exceeds a
cutoff initially set to 5. This can be altered by resetting zcutoff in the
LIMITS command.
PLOT
Plots the data and/or fitted curves. As plotting is machine-specific the code
is supplied only as a dummy call. The arguments are:
plot(n,t,y,z,nc,tc,lamc,muc,sigc) where
n number of data points
t(n) t data
y(n) y data
z(n) standard deviation scores for the data points
nc number of distinct t values for each curve
tc(nc) distinct t values
lamc(nc) values for L curve
muc(nc) values for M curve
sigc(nc) values for S curve
Users can write a short plot subroutine using library calls to their local
graphics library to produce plots of the results.
SAVE [nsave]
Saves results to file for later analysis. The argument nsave defines what is
saved, made up as the sum of the following components:
1 n, nc, likelihoods, tv, alphas, edfs
2 tc(nc), lamc(nc), muc(nc), sigc(nc)
4 t(n), y(n), z(n)
The contents of these arrays are as described for the PLOT command. The file
name is of the form 'lms.nn', where the counter nn starts at 01 and increments
to a maximum of 99. The format of data in the file is as follows:
nsave = 1
line 1: n, nc, 5 likelihoods, tv (2i6,5f12.5,f10.4)
line 2: 3 alphas (3e12.4)
line 3: 3 edfs (3f8.3)
nsave = 2
lines 1...nc: nc values of arrays tc, lamc, muc, sigc (5e12.5)
nsave = 4
lines 1...n: n values of arrays t, y, z (2f12.5,f6.2,f12.5)
When nsave is 2 or 4, if tv is non-zero, the transformed t array tc2 is added
at the end of each line.
CI [ncycle]
Generates confidence intervals for the fitted LMS curves. A series of new sets
of data are simulated from the fitted LMS curves, assuming that the data are
normal equivalent deviates when converted to Z scores. Each dataset is then
fitted again from scratch, with the alphas set to the previously fitted values.
Several such datasets are generated (the number set by argument ncycle, default
25) and their fitted LMS curves written to the file 'cilms.nn' where nn is a
counter, initially 01. If cilms.01 exists, the counter is incremented. The
number of values from the LMS curves saved (nct) is the smaller of nc and 201.
Under normal order statistic theory, the extremes of the range of values of L,
M and S at each t across 25 simulated samples represents a 95% confidence
interval. For greater accuracy, ncycle can be increased and the 2nd or 3rd
largest and smallest values used to construct confidence intervals.
The format of the cilms,nn file is as follows:
original results
line 1: nct, ncycle (2i6)
line 2: n, nc, 5 likelihoods, tv (2i6,5f12.5,f10.4)
line 3: 3 alphas (3e12.4)
line 4: 3 edfs (3f8.3)
lines 5...nct+4: arrays tc,lamc,muc,sigc,tc2 (5e12.5)
followed by
cycle 1 of simulated results
line 1: n, nc, 5 likelihoods, tv (2i6,5f12.5,f10.4)
line 2: 3 alphas (3e12.4) [same as for original data]
line 3: 3 edfs (3f8.3)
lines 4...nct+3: arrays lamc,muc,sigc,tc2 (4e12.5)
followed by
cycle 2 of simulated results
etc
followed by
cycle ncycle of simulated results
Thus the file contains (ncycle+1)*(nct+3)+1 lines of data. Note that tc2 is
written only if tv is non-zero. To generate confidence intervals for the LMS
curves and centiles, another program is needed to process the file. A Genstat
procedure is available for this.
After fitting confidence intervals the program stops.
# [comment]
Lines beginning # are treated as comments and are ignored.
! [unix system command
]
Lines beginning ! indicate a shell escape, and the rest of the line is passed
unchanged to the Bourne shell (sh). This is useful e.g. for listing files in a
directory (ls), looking at the first few lines of a file (head), or renaming a
file (mv).
QUIT
Stops the program.
--------
Tim Cole
7 Mar 96
MRC Dunn Nutrition Centre, Downhams Lane, Milton Road, Cambridge CB4 1XJ, UK
tel: +44 1223 426356 fax: +44 1223 426617 Tim.Cole@mrc-dunn.cam.ac.uk
SHAR_EOF
fi # end of overwriting check
if test -f 'ci.f'
then
echo shar: will not over-write existing file "'ci.f'"
else
cat << \SHAR_EOF > 'ci.f'
subroutine ci(n,y,alpha,edf,vlms,index,rindex,kt,nc,tc,yc,
& ppc,v,w1c,w2c,w3c,d1c,d2c,d3c,u1c,u2c,u3c,
& z,ab21,ab12,ab13,ab31,ab23,ab32,lamc,muc,sigc,
& tc2,lamc2,muc2,sigc2,
& itinx,itoutx,tolin,tolout,toledf,zcut,rep,ncycle,mon)
c.. calculate confidence intervals for spline curves by simulation
implicit double precision(a-h,o-z)
double precision lamc,muc,lamc2,muc2
logical mon
character*8 filename
dimension y(n),z(n),alpha(3),edf(3),vlms(3),rep(6),
& d1c(nc),d2c(nc),d3c(nc),w1c(nc),w2c(nc),w3c(nc),
& ab21(nc),ab12(nc),ab13(nc),ab31(nc),ab23(nc),ab32(nc),
& u1c(nc),u2c(nc),u3c(nc),ppc(nc,4),v(nc,7),
& yc(nc),tc(nc),lamc(nc),muc(nc),sigc(nc),
& tc2(nc),lamc2(nc),muc2(nc),sigc2(nc)
integer index(n),rindex(n),kt(nc),lut(201)
data nfile/0/
if(ncycle.le.0) go to 8
c.. obtain subset of ages
np = 201
if(nc.lt.np) np = nc
lut(1) = 1
sum = 1.5
delta = dfloat(nc-1)/dfloat(np-1)
do 10 ip = 2,np
sum = sum + delta
10 lut(ip) = sum
c.. save results to file
6 nfile = nfile + 1
write(filename,"('cilms.',i2.2)") nfile
open(8,file=filename,status='new',iostat=ierr,err=7)
write(8,100) np,ncycle
write(8,100) n,nc,rep,alpha,edf
do 22 ip = 1,np
j = lut(ip)
if(rep(6).eq.0.0) then
write(8,101) tc(j),lamc(j),muc(j),sigc(j)
else
write(8,101) tc(j),lamc(j),muc(j),sigc(j),tc2(j)
endif
22 continue
100 format(2i6,6f10.2/3e12.4/3f8.3)
101 format(5e12.5)
c.. do repeated simulations
icycle = 0
20 icycle = icycle + 1
if(icycle.gt.ncycle) go to 21
c.. save/restore copies of spline curves
if (icycle.eq.1) then
do 1 ic = 1,nc
lamc2(ic) = lamc(ic)
muc2(ic) = muc(ic)
1 sigc2(ic) = sigc(ic)
else
do 2 ic=1,nc
lamc(ic) = lamc2(ic)
muc(ic) = muc2(ic)
2 sigc(ic) = sigc2(ic)
endif
c.. simulate new data
call gauss(z,n)
do 3 i = 1,n
ic = rindex(i)
if(dabs(lamc(ic)).gt.1d-3) then
5 t1 = 1.0+lamc(ic)*sigc(ic)*z(i)
if(t1.le.0.0) then
write(*,'("*** z of",f5.1," resampled")') z(i)
call gauss(z(i),1)
go to 5
endif
y(i) = muc(ic)*t1**(1/lamc(ic))
else
y(i) = muc(i)*dexp(sigc(ic)*z(i))
endif
3 continue
c.. fit data for one cycle
if(mon) write(*,'("cycle:",i3)') icycle
call iter(n,y,z,u1c,u2c,u3c,alpha,edf,vlms,index,rindex,kt,
& nc,tc,yc,w1c,w2c,w3c,d1c,d2c,d3c,lamc,muc,sigc,ppc,v,
& ab21,ab12,ab13,ab31,ab23,ab32,tc2,
& itinx,itoutx,tolin,tolout,toledf,zcut,rep,.true.,ifault,mon)
if(ifault.eq.16) then
call rsave(n,y,z,nc,tc,tc2,lamc,muc,sigc,
& alpha,edf,rep,rindex,4)
icycle = icycle - 1
go to 20
endif
c.. save spline curves to file
write(8,100) n,nc,rep,alpha,edf
do 4 ip = 1,np
j = lut(ip)
if(rep(6).eq.0.0) then
write(8,101) lamc(j),muc(j),sigc(j)
else
write(8,101) lamc(j),muc(j),sigc(j),tc2(j)
endif
4 continue
go to 20
21 close(8)
write(*,'("Saved to ",a)') filename
go to 8
7 if(nfile.eq.99) then
write(0,"('File cilms.99 exists')")
else if(ierr.ne.1017) then
write(0,"('File error',i5)") ierr
else
go to 6
end if
8 return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'cmprss.f'
then
echo shar: will not over-write existing file "'cmprss.f'"
else
cat << \SHAR_EOF > 'cmprss.f'
subroutine cmprss(n,y,nc,yc,index,kt)
c.. returns the means of the y's in yc
implicit double precision(a-h,o-z)
dimension y(n),yc(nc),index(n),kt(nc)
iptr = 0
do 2 ic = 1,nc
ysum = 0.0
do 3 i = iptr+1,iptr+kt(ic)
3 ysum = ysum+y(index(i))
yc(ic) = ysum/kt(ic)
2 iptr = iptr+kt(ic)
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'commas.f'
then
echo shar: will not over-write existing file "'commas.f'"
else
cat << \SHAR_EOF > 'commas.f'
character*80 function commas(string,n)
character*(*) string
c
c adds commas at end of string, to satisfy n fields
c and converts tabs to commas
c
k = lnblnk(string)
if (k.gt.0) then
do 1 i = 1,k
if (ichar(string(i:i)).eq.9) string(i:i) = ','
1 continue
end if
c
do 2 i = 1,n
if (k.eq.0) then
string = ','
else
string = string(1:k)//','
end if
2 k = lnblnk(string)
c
commas = string
c
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'csmspl.f'
then
echo shar: will not over-write existing file "'csmspl.f'"
else
cat << \SHAR_EOF > 'csmspl.f'
subroutine csmspl(x,y,dy,npts,alpha,v,ppc,ld,icay,ionly)
c.. Cubic smoothing spline PJG 13 May 1985
c tidied up 3 April 1991 and 4 January 1994
c based on smooth, setupq and chol1d
c from "A practical guide to splines" by Carl de Boor
c in turn based on Reinsch's smooth algorithm
c
c Constructs the cubic smoothing spline f to given data
c (x(i),y(i)), i=1,...,npts, minimizing
c s(f) + alpha * integrated squared 2nd derivative of f,
c where s(f) = sum( ((y(i)-f(x(i)))/dy(i))**2 , i=1,...,npts )
c
c****** i n p u t ******
c x(1..npts) data abscissae, assumed to be strictly increasing
c y(1..npts) corresponding data ordinates .
c dy(1..npts) estimate of uncertainty in data, assumed positive
c npts.....number of data points, assumed .gt. 1
c alpha......tuning constant (defined as in Green and Silverman)
c icay.......switch to control :
c 0 do everything
c 1 only y has changed since last call
c 2 only alpha has - - - - - - - - - - -
c 3 only y and alpha have - - - - - - -
c ***N.B. Use great care if setting icay nonzero:
c icay = 1 uses information left in all of v
c icay = 2 uses v(.,4:7) and ppc(.,2) AND requires all calls
c since last icay.ne.2 to have ionly.ne.0
c icay = 3 uses v(.,4:7)
c ionly...... -1 computes only ppc(1..n,1)
c +i - - - - only ppc(i,1)
c 0 - - - - all of ppc
c
c****** w o r k a r r a y s *****
c v.....of size (npts,7)
c ppc...of size (npts,4)
c
c***** o u t p u t *****
c ppc(.,1).....contains the sequence of smoothed ordinates .
c ppc(i,j) = d**(j-1)f(x(i)), j=2,3,4, i=1,...,npts-1 , i.e., the
c first three derivatives of the smoothing spline f at the
c left end of each of the data intervals .
c (but note ionly parameter, see above).
c v(2:n-1,1:3) contains decomposition of (alphaQ'D^2Q+R)
c v(1:n-1,4) contains (x(i+1)-x(i),i=1:n-1)
c v(2:n-1,5:7) contains Q'D^2Q
c
c****** m e t h o d ******
c The matrices Q'D and Q'D^2Q are constructed
c from x and dy , as is the vector Q'Y.
c Then, for given alpha , the vector gamma is determined as
c the solution of the linear system
c (alpha Q'D^2Q + R) gamma = Q'Y
c From gamma , the smoothing spline f (for this choice of smoothing par-
c ameter alpha ) is obtained in the sense that
c f(x(.)) = Y - alpha D^2 Q gamma and
c (d**2)f(x(.)) = gamma
c The above description now uses the notation of Green and Silverman, chap. 2,
c except that D^2 is used instead of W^-1.
c N.B. This version leaves a differently scaled version of the
c decomposition in array v from that used by de Boor (see above): this has
c implications for other routines using that information.
implicit double precision(a-h,o-z)
dimension ppc(ld,4),dy(npts),v(ld,7),x(npts),y(npts)
npm1 = npts - 1
npm2 = npts - 2
athird = 1.0/3.0
asixth = 1.0/6.0
c Put h = x(.+1) - x(.) into v(.,4),
c Put the three bands of Q'D into v(.,1:3), and
c put the three bands of Q'D^2Q at and above the diagonal
c into v(.,5:7) .
c Here, Q is the tridiagonal matrix of order (npts-2,npts)
c with general row 1/h(i) , -1/h(i) - 1/h(i+1) , 1/h(i+1)
c and D is the diagonal matrix with general row dy(i) .
c (was de Boor's routine setupq ( x, dy, y, npts, v, qty(=ppc(1,2)) ))
if(icay.eq.0) then
v(1,4) = x(2) - x(1)
do 11 i = 2,npm1
v(i,4) = x(i+1) - x(i)
v(i,1) = dy(i-1)/v(i-1,4)
v(i,2) = - dy(i)/v(i,4) - dy(i)/v(i-1,4)
11 v(i,3) = dy(i+1)/v(i,4)
v(npts,1) = 0.
do 12 i = 2,npm1
12 v(i,5) = v(i,1)**2 + v(i,2)**2 + v(i,3)**2
do 13 i = 3,npm1
13 v(i-1,6) = v(i-1,2)*v(i,1) + v(i-1,3)*v(i,2)
v(npm1,6) = 0.
do 15 i = 4,npm1
15 v(i-2,7) = v(i-2,3)*v(i,1)
v(npm2,7) = 0.
v(npm1,7) = 0.
end if
c.. STEP 1: Construct Q'Y in ppc(.,2)
c (see G&S (2.7))
if(icay.ne.2) then
prev = (y(2) - y(1))/v(1,4)
do 17 i = 2,npm1
diff = (y(i+1)-y(i))/v(i,4)
ppc(i,2) = diff - prev
17 prev = diff
end if
c.. STEP 2: Construct the upper 3 diagonals of alpha Q'D^2Q + R
c in v(2:npts-1,1:3), then overwrite this with its
c Cholesky decomposition.
c (See G&S, pp. 20 and 41)
c (was de Boor's routine
c chol1d(p,v,qty(=ppc(.,2),npts,ncol(=1),u(=ppc(.,3)),qu(=ppc(.,1)) )
if(icay.ne.1) then
do 2 i = 2,npm1
v(i,1) = alpha*v(i,5) + athird*(v(i-1,4)+v(i,4))
v(i,2) = alpha*v(i,6) + asixth*v(i,4)
2 v(i,3) = alpha*v(i,7)
c.. Factorization
do 120 i = 2,npm2
ratio = v(i,2)/v(i,1)
v(i+1,1) = v(i+1,1) - ratio*v(i,2)
v(i+1,2) = v(i+1,2) - ratio*v(i,3)
v(i,2) = ratio
ratio = v(i,3)/v(i,1)
v(i+2,1) = v(i+2,1) - ratio*v(i,3)
120 v(i,3) = ratio
end if
c.. STEP 3: solve for gamma (which is put in ppc(.,3))
c Apply forward and backsubstitution to the right side
c Q'Y in ppc(.,2) to obtain the solution for gamma.
if(npts.lt.4) then
ppc(1,3) = 0.
ppc(2,3) = ppc(2,2)/v(2,1)
ppc(3,3) = 0.
else
c.. Forward substitution
ppc(1,3) = 0.
v(1,3) = 0.
ppc(2,3) = ppc(2,2)
do 130 i = 2,npm2
130 ppc(i+1,3) = ppc(i+1,2) - v(i,2)*ppc(i,3) -
& v(i-1,3)*ppc(i-1,3)
c.. Back substitution
ppc(npts,3) = 0.
ppc(npm1,3) = ppc(npm1,3)/v(npm1,1)
do 40 i = npm2,2,-1
40 ppc(i,3) = ppc(i,3)/v(i,1)-ppc(i+1,3)*v(i,2)-
& ppc(i+2,3)*v(i,3)
end if
c.. STEP 4: find g and other derivatives
c.. Construct Q gamma in ppc(.,1)
prev = 0.0
do 50 i = 2,npts
ppc(i,1) = (ppc(i,3) - ppc(i-1,3))/v(i-1,4)
ppc(i-1,1) = ppc(i,1) - prev
50 prev = ppc(i,1)
ppc(npts,1) = -ppc(npts,1)
if(ionly.gt.0) then
ppc(ionly,1) = y(ionly) - alpha*dy(ionly)**2*ppc(ionly,1)
return
else
do 61 i = 1,npts
61 ppc(i,1) = y(i) - alpha*dy(i)**2*ppc(i,1)
if(ionly.lt.0) return
do 63 i = 1,npm1
ppc(i,4) = (ppc(i+1,3)-ppc(i,3))/v(i,4)
63 ppc(i,2) = (ppc(i+1,1)-ppc(i,1))/v(i,4)
& - (ppc(i,3)+ppc(i,4)/3.*v(i,4))/2.*v(i,4)
end if
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'cspl.f'
then
echo shar: will not over-write existing file "'cspl.f'"
else
cat << \SHAR_EOF > 'cspl.f'
subroutine cspl(n,nc,tc,yc,wc,dc,lmsc,alpha,rough,
& edf,target,ionly,convin,kt,ppc,v)
c.. fits cubic spline in tc to yc, with inverse weights in dc,
c and returns roughness penalty and edf.
c yc is the arithmetic mean of the y at each distinct time tc.
implicit double precision(a-h,o-z)
double precision lmsc
logical convin
dimension tc(nc),yc(nc),wc(nc),dc(nc),lmsc(nc),
& ppc(nc,4),v(nc,7)
integer kt(nc)
if(target.ge.2.0) then
call csmspl(tc,yc,dc,nc,alpha,v,ppc,nc,0,ionly)
do 63 ic = 1,nc
63 lmsc(ic) = ppc(ic,1)
if(convin) then
rough = rfcspl(tc,nc,ppc,nc)*alpha
edf = trace(nc,ppc,v,nc)
end if
else if(target.ge.1.0) then
ysum = 0.0
wsum = 0.0
do 3 ic = 1,nc
wt = wc(ic)*kt(ic)
ysum = ysum+yc(ic)*wt
3 wsum = wsum+wt
ysum = ysum/wsum
do 1 ic = 1,nc
1 lmsc(ic) = ysum
rough = 0.0
edf = 1.0
else
rough = 0.0
edf = 0.0
end if
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'gauss.f'
then
echo shar: will not over-write existing file "'gauss.f'"
else
cat << \SHAR_EOF > 'gauss.f'
subroutine gauss(z,n)
c.. generates an n-vector of i.i.d. n(0,1) r.v.'s z
double precision z(n)
n1 = n-1
do 1 i = 1,n1,2
u = sqrt(-2.0*alog(rand(0)))
v = 6.2831853*rand(0)
z(i) = u*sin(v)
1 z(i+1) = u*cos(v)
if(mod(n,2).eq.0) return
z(n) = sqrt(-2.0*alog(rand(0)))*sin(6.2831853*rand(0))
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'grade.f'
then
echo shar: will not over-write existing file "'grade.f'"
else
cat << \SHAR_EOF > 'grade.f'
subroutine grade(n,t,nc,tc,index,rindex,kt)
c sorts array index(i), i = 1 to n
c according to values of t(index(i)), so
c obtains index(i) = index of i-th smallest t.
implicit double precision(a-h,o-z)
integer rindex
dimension t(n),tc(nc),index(n),rindex(n),kt(nc)
do 4 i = 1,n
4 index(i) = i
mesh = n
1 mesh = (mesh+1)/3
ist = mesh+1
do 3 i1 = ist,n
indexi = index(i1)
ti = t(indexi)
inow = i1
do 2 i2 = ist,i1,mesh
inext = inow-mesh
indexn = index(inext)
if(ti.ge.t(indexn)) go to 3
index(inow) = indexn
2 inow = inext
3 index(inow) = indexi
if(mesh.gt.1) go to 1
c.. copied from cmprss
c obtains j = rindex(i) = j-th distinct time for i-th data
c and kt(j) = count for j-th distinct time
tsave = t(index(1))
nc = 1
tc(1) = tsave
kt(1) = 1
rindex(index(1)) = 1
do 5 i = 2,n
j = index(i)
if(t(j).ne.tsave) then
tsave = t(j)
nc = nc+1
tc(nc) = tsave
kt(nc) = 1
else
kt(nc) = kt(nc)+1
end if
rindex(j) = nc
5 continue
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'hdeh.f'
then
echo shar: will not over-write existing file "'hdeh.f'"
else
cat << \SHAR_EOF > 'hdeh.f'
subroutine hdeh(a,lda,n,b1,w)
implicit double precision(a-h,o-z)
integer b,b1
dimension a(lda,b1),w(b1)
c
c near-diagonal elements of inverse of a banded
c non-negative definite matrix a = ldl'
c algorithm of Hutchinson and de Hoog (numer. math., 1985?)
c follows rational cholesky decomposition (e.g. by rchb)
c storage:
c a and hence l is banded
c (ainverse)ij = (ainverse)ji held in a(i,j-i+1) j=i to i+b
c lii = 1
c lji held in a(i,j-i+1) j = i+1 to i+b
c dii held in a(i,1)
c so output overwrites input.
c w is a vector of workspace of length b
c
b = b1-1
a(n,1) = 1.0/a(n,1)
do 1 ii = 2,n
i = n+1-ii
a(i,1) = 1.0/a(i,1)
m = min0(b,n-i)
do 2 j = 1,m
sum = 0.0
do 3 k = 1,j
3 sum = sum+a(i,k+1)*a(i+k,j-k+1)
if(j.eq.m) go to 5
do 6 k = j+1,m
6 sum = sum+a(i,k+1)*a(i+j,k-j+1)
5 a(i,1) = a(i,1)+a(i,j+1)*sum
2 w(j) = -sum
do 4 j = 1,m
4 a(i,j+1) = w(j)
1 continue
c
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'init.f'
then
echo shar: will not over-write existing file "'init.f'"
else
cat << \SHAR_EOF > 'init.f'
subroutine init(n,y,ywk,nc,tc,yc,dc,alpha,lamc,muc,sigc,
& vlms,index,rindex,kt,ppc,v,mon)
implicit double precision(a-h,o-z)
double precision lamc,muc,lt
logical mon
dimension y(n),ywk(n),tc(nc),yc(nc),dc(nc),alpha(3),edf(3),
& lamc(nc),muc(nc),sigc(nc),ppc(nc,4),v(nc,7),vlms(3)
integer index(n),rindex(n),kt(nc)
edf(1) = 1.0
42 lt = vlms(1)
do 40 i = 1,n
yt = y(i)
if(lt.eq.1.0) then
ywk(i) = yt
else
yt = dlog(yt)
if(lt.eq.0.0) then
ywk(i) = yt
else
ywk(i) = dexp(yt*lt)
endif
endif
40 continue
do 46 ic = 1,nc
46 dc(ic) = 1.0/dsqrt(dfloat(kt(ic)))
call cmprss(n,ywk,nc,yc,index,kt)
call cspl(n,nc,tc,yc,dc,dc,muc,alpha(2),rough,
& edf(2),3.0,-1,.true.,kt,ppc,v)
do 49 ic = 1,nc
lamc(ic) = lt
if(lt.eq.0.0) then
muc(ic) = dexp(muc(ic))
else
if(muc(ic).le.0.0) then
vlms(1) = 0.0
go to 42
else if(lt.ne.1.0) then
muc(ic) = muc(ic)**(1/lt)
endif
endif
49 continue
do 47 i = 1,n
yt = y(i)/muc(rindex(i))
if(lt.eq.1.0) then
yt = yt-1.0
else if(lt.eq.0.0) then
yt = dlog(yt)
else if(lt.eq.-1.0) then
yt = 1.0-1.0/yt
else
yt = (yt**lt-1.0)/lt
endif
47 ywk(i) = yt**2
call cmprss(n,ywk,nc,yc,index,kt)
call cspl(n,nc,tc,yc,dc,dc,sigc,alpha(3),rough,
& edf(3),3.0,-1,.true.,kt,ppc,v)
do 50 ic = 1,nc
if(sigc(ic).gt.0.0) then
sigc(ic) = dsqrt(sigc(ic))
else
sigc(ic) = dsqrt(-sigc(ic))
end if
50 continue
if(mon) write(*,'("alpha:",3e11.4," edf:",3f6.2)') alpha,edf
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'iter.f'
then
echo shar: will not over-write existing file "'iter.f'"
else
cat << \SHAR_EOF > 'iter.f'
subroutine iter(n,y,z,u1c,u2c,u3c,alpha,edf,vlms,index,rindex,
& kt,nc,tc,yc,w1c,w2c,w3c,d1c,d2c,d3c,lamc,muc,sigc,ppc,v,
& ab21,ab12,ab13,ab31,ab23,ab32,tc2,
& itinx,itoutx,tolin,tolout,toledf,zcut,rep,fxalph,ifault,mon)
implicit double precision(a-h,o-z)
double precision lambda,mu,lamc,muc,lik
logical edfok,convin,strchd,nonpos,fxalph,mon
dimension y(n),z(n),u1c(nc),u2c(nc),u3c(nc),
& tc(nc),yc(nc),w1c(nc),w2c(nc),w3c(nc),d1c(nc),d2c(nc),
& d3c(nc),lamc(nc),muc(nc),sigc(nc),ppc(nc,4),v(nc,7),
& ab21(nc),ab12(nc),ab13(nc),ab31(nc),ab23(nc),ab32(nc),
& tc2(nc),alpha(3),edf(3),vlms(3),alphat(3),rep(6)
integer index(n),rindex(n),kt(nc)
c.. Set up for confidence intervals
ifault = 0
edfok = fxalph
nedfok = 0
velt = rep(6)
if(velt.lt.0.0) then
call ustrch(n,nc,tc,tc2,rindex,velt)
end if
strchd = velt.le.0.0
if(.not.strchd) rep(6) = 0.0
alphal = alpha(1)
alpham = alpha(2)
alphas = alpha(3)
targtl = edf(1)
targtm = edf(2)
targts = edf(3)
istatl = 0
istatm = 0
istats = 0
c.. Main loop
plik = 0.0
roughl = 0.0
roughm = 0.0
roughs = 0.0
do 50 itout = 1,itoutx
c.. Check for positive M and S curve
nonpos = .false.
negprv = 0
do 5 ic = 1,nc
if(muc(ic).le.0.0.or.sigc(ic).le.0.0) then
if(vlms(1).ne.0.0) then
ifault = 16
go to 52
else if(.not.nonpos) then
write(0,'("M or S curve non-positive"/
& "______ t __ lambda ______ mu ___ sigma")')
nonpos = .true.
end if
if(negprv.eq.0) then
write(0,'(f8.3,3e10.3)') tc(ic),lamc(ic),muc(ic),sigc(ic)
negprv = ic
end if
else
if(negprv.ne.0) then
jc = ic-1
write(0,'(f8.3,3e10.3)') tc(jc),lamc(jc),muc(jc),sigc(jc)
write(0,'("***",i6," distinct times")') ic-negprv
negprv = 0
end if
end if
5 continue
if(negprv.ne.0) then
write(0,'(f8.3,3e10.3)') tc(nc),lamc(nc),muc(nc),sigc(nc)
write(0,'("***",i6," distinct times")') nc+1-negprv
end if
if(nonpos) then
ifault = 16
go to 52
end if
c.. Calculate scores and likelihood
plikp = plik
lik = 0.0
iptr = 0
do 7 ic = 1,nc
u1 = 0.0
u2 = 0.0
u3 = 0.0
lambda = lamc(ic)
mu = muc(ic)
sigma = sigc(ic)
do 1 i = iptr+1,iptr+kt(ic)
sz0 = dlog(y(index(i))/mu)
if(dabs(lambda).gt.1d-3) then
zt = (dexp(sz0*lambda)-1.0)/(sigma*lambda)
z2 = zt*zt
u1 = u1+sz0*(1.0-z2)+zt/lambda*(zt-sz0/sigma)
else
zt = sz0/sigma
z2 = zt*zt
u1 = u1+sz0*(1.0-0.5*z2)
end if
u2 = u2+((z2-1.0)*lambda+zt/sigma)/mu
u3 = u3+(z2-1.0)/sigma
z(index(i)) = zt
if(abs(zt).ge.zcut)
& write(*,'("line",i5," t =",f8.3," y =",f8.3," z =",f5.1)')
& index(i),tc(ic),y(index(i)),zt
1 lik = lik-0.5*z2+lambda*sz0-dlog(sigma)
u1c(ic) = u1/kt(ic)
u2c(ic) = u2/kt(ic)
u3c(ic) = u3/kt(ic)
7 iptr = iptr+kt(ic)
plik = lik-0.5*(roughl+roughm+roughs)
rep(1) = lik
if(.not.edfok) rep(1) = -rep(1)
rep(2) = roughl
rep(3) = roughm
rep(4) = roughs
rep(5) = plik
if(mon) write(*,'("itout:",i3,5f10.2,f6.2,$)') itout,rep
c.. Check for convergence and transform t scale if requested
if(nedfok.gt.1) then
if(strchd) then
if(abs(plik-plikp).lt.tolout) then
if(mon) write(*,'(" converged")')
go to 51
end if
else
call strch(n,nc,tc,tc2,muc,rindex,velt)
strchd = .true.
rep(6) = velt
edfok = fxalph
nedfok = 0
end if
end if
c.. Calculate weights
do 2 ic = 1,nc
w1c(ic) = 1.75*sigc(ic)**2
w2c(ic) = (2.0*lamc(ic)**2+1.0/sigc(ic)**2)/muc(ic)**2
2 w3c(ic) = 2.0/sigc(ic)**2
c.. Calculate inverse weights
do 6 ic = 1,nc
d1c(ic) = 1.0/dsqrt(w1c(ic)*kt(ic))
d2c(ic) = 1.0/dsqrt(w2c(ic)*kt(ic))
6 d3c(ic) = 1.0/dsqrt(w3c(ic)*kt(ic))
c.. Calculate multipliers
do 3 ic = 1,nc
t1 = -0.5/muc(ic)
ab12(ic) = t1/w1c(ic)
ab21(ic) = t1/w2c(ic)
t1 = lamc(ic)*sigc(ic)
ab13(ic) = t1/w1c(ic)
ab31(ic) = t1/w3c(ic)
t1 = 2.0*lamc(ic)/(sigc(ic)*muc(ic))
ab23(ic) = t1/w2c(ic)
3 ab32(ic) = t1/w3c(ic)
c.. Calculate working responses
do 4 ic = 1,nc
u1c(ic) = u1c(ic)/w1c(ic)
& +lamc(ic)+ab12(ic)*muc(ic)+ab13(ic)*sigc(ic)
u2c(ic) = u2c(ic)/w2c(ic)
& +muc(ic)+ab21(ic)*lamc(ic)+ab23(ic)*sigc(ic)
4 u3c(ic) = u3c(ic)/w3c(ic)
& +sigc(ic)+ab31(ic)*lamc(ic)+ab32(ic)*muc(ic)
c.. Cycle over smoothers
convin = .false.
ionly = -1
do 55 itin = 1,itinx
convin = convin.or.itin.eq.itinx
if(convin) ionly = 0
c.. Lambda
do 61 ic = 1,nc
61 yc(ic) = u1c(ic)-ab12(ic)*muc(ic)-ab13(ic)*sigc(ic)
call cspl(n,nc,tc,yc,w1c,d1c,lamc,alphal,roughl,
& edfl,targtl,ionly,convin,kt,ppc,v)
c.. Mu
do 64 ic = 1,nc
64 yc(ic) = u2c(ic)-ab21(ic)*lamc(ic)-ab23(ic)*sigc(ic)
call cspl(n,nc,tc,yc,w2c,d2c,muc,alpham,roughm,
& edfm,targtm,ionly,convin,kt,ppc,v)
c.. Sigma
do 67 ic = 1,nc
67 yc(ic) = u3c(ic)-ab31(ic)*lamc(ic)-ab32(ic)*muc(ic)
call cspl(n,nc,tc,yc,w3c,d3c,yc,alphas,roughs,
& edfs,targts,ionly,convin,kt,ppc,v)
c.. Check for convergence of inner loop
change = 0.0
do 70 ic = 1,nc
change = change+abs(sigc(ic)-yc(ic))*kt(ic)
70 sigc(ic) = yc(ic)
if(convin) go to 60
convin = change.lt.tolin
55 continue
c.. On convergence of inner loop, save edf
c and update alpha's if needed
60 if(mon) write(*,'(" itin:",i3)') itin
edf(1) = edfl
edf(2) = edfm
edf(3) = edfs
if(.not.edfok) then
call upd(istatl,factrl,targtl,apl,epl,alphal,edfl,lamc(1),mon)
call upd(istatm,factrm,targtm,apm,epm,alpham,edfm,muc(1),mon)
call upd(istats,factrs,targts,aps,eps,alphas,edfs,sigc(1),mon)
edfok =
& (targtl.lt.2.0.or.abs(targtl-edfl).lt.toledf).and.
& (targtm.lt.2.0.or.abs(targtm-edfm).lt.toledf).and.
& (targts.lt.2.0.or.abs(targts-edfs).lt.toledf)
if(.not.edfok) then
alpha(1) = alphal
alpha(2) = alpham
alpha(3) = alphas
end if
end if
if(edfok) nedfok = nedfok + 1
c.. End of main loop
50 continue
c.. No convergence
ifault = 4
51 do 53 i = 1,3
53 alphat(i) = alpha(i)
if(edf(1).lt.2.0) alphat(1) = lamc(1)
if(edf(2).lt.2.0) alphat(2) = muc(1)
if(edf(3).lt.2.0) alphat(3) = sigc(1)
write(*,'("alpha:",3e11.4," edf:",3f6.2)') alphat,edf
write(*,'("itout:",i3,5f10.2,f6.2)') itout,rep
c.. Restore t scale if necessary
52 call ustrch(n,nc,tc,tc2,rindex,rep(6))
rep(6) = velt
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'lms.f'
then
echo shar: will not over-write existing file "'lms.f'"
else
cat << \SHAR_EOF > 'lms.f'
c.. Program to fit smoothed centile curves, according to
c method of T. J. Cole and P. J. Green (Statistics in Medicine 1992)
c.. Set nx to number of points, and ncx to number of distinct time points
parameter
& (nx=21000,ncx=9000,nncx=35,lnwork=3*nx,lwork=nx+nncx*ncx)
implicit double precision(a-h,o-z)
character rule*10,fmt*80,file*80,commas*80,image*300,field(30)*12
logical free,eof
dimension y(nx),t(nx),nwork(lnwork),work(lwork)
data nfield/30/,rule/'123456789 '/
write(*,'("Type h for help")')
c.. Read data file and format
46 write(*,'("Enter file name: ",$)')
41 read(*,'(a)',end=101,err=101) file
if(match(file,'help')) then
write(*,"('Enter name of file containing the data')")
go to 41
else if(match('#',file)) then
go to 46
else if(match('!',file)) then
call system(file(2:))
go to 46
end if
open(4,file=file,status='old',iostat=ierr,err=102)
47 write(*,'("Enter file format",$)')
40 write(*,'(": ",$)')
read(*,'(a)',end=101,err=101) fmt
nfmt = lnblnk(fmt)
if(match('#',fmt)) then
go to 47
else if(match('!',fmt)) then
call system(fmt(2:))
go to 47
else if(fmt(1:1).eq.'?') then
len = 1
do 45 i = 1,4
read(4,'(a)') image
len = max(len,lnblnk(image))
45 write(*,'(a)') image(1:lnblnk(image))
len = (len-1)/10+1
write(*,'(8a)') (rule,i=1,len)
rewind 4
go to 40
else if(fmt(1:1).ge.'0'.and.fmt(1:1).le.'9') then
free = .true.
read(commas(fmt,2),*) nft,nfy
nfty = max(nft,nfy)
if(nfty.gt.nfield) then
write(0,'("only space for",i3," fields")') nfield
go to 40
end if
else if(fmt(1:1).eq.'(') then
free = .false.
else
write(*,'(
& "Enter format for (t,y) pairs, assuming one pair per line"/
& "if fixed format give FORTRAN format statement WITH brackets"/
& "if free format give two field numbers"/
& "Examples:"/
& "(t4,f7,t15,f3.2) means t in cols 4-10, and"/
& " y in cols 15-17 with 2 decimal places"/
& "2 5 means t in field 2, y in field 5"/
& "field separators are space(s), comma and tab"//
& "Type ? to see first four lines of file")')
go to 40
end if
c.. Read data file
i = 0
line = 0
nerr = 0
nought = 0
42 i = i+1
line = line + 1
if(free) then
call rdfree(4,image,nfty,field,eof)
if(eof) go to 43
if(field(nft).eq.' '.or.field(nfy).eq.' ') go to 44
read(field(nft),*,err=44,iostat=ierr) t(i)
read(field(nfy),*,err=44,iostat=ierr) y(i)
else
read(4,fmt,end=43,err=44,iostat=ierr) t(i),y(i)
endif
if (y(i).le.0.0) then
nought = nought + 1
i = i-1
endif
go to 42
44 nerr = nerr + 1
i = i-1
go to 42
43 n = i-1
if(nerr.gt.0) write(*,'(i5," missing data")') nerr
if(nought.gt.0) write(*,'(i5," non-positive data")') nought
if(n.gt.nx) then
write(0,'("Not enough memory: n =",i6," > nx =",i6)') n,nx
else if(n.lt.10) then
write(0,'("Not enough data: n =",i2)') n
else
call loopx(n,t,y,nwork,(lwork-n)/nncx,work)
end if
go to 101
102 if(lnblnk(file).eq.0) then
go to 101
else if(ierr.eq.1018) then
write(0,"('File not found')")
go to 46
else
write(0,"('File error ',i4)") ierr
end if
101 stop
end
SHAR_EOF
fi # end of overwriting check
if test -f 'loop.f'
then
echo shar: will not over-write existing file "'loop.f'"
else
cat << \SHAR_EOF > 'loop.f'
subroutine loop(n,t,y,z,index,rindex,kt,nc,tc,yc,
& lamc,muc,sigc,tc2,lamc2,muc2,sigc2,
& u1c,u2c,u3c,w1c,w2c,w3c,d1c,d2c,d3c,
& ab21,ab12,ab13,ab31,ab23,ab32,ppc,v)
implicit double precision(a-h,o-z)
double precision lamc,muc,lamc2,muc2
character commas*80,line*40,cmd*40,word*40,lms*3
logical first,mon,fxalph
dimension t(n),y(n),z(n),u1c(nc),u2c(nc),u3c(nc),
& d1c(nc),d2c(nc),d3c(nc),w1c(nc),w2c(nc),w3c(nc),
& ab21(nc),ab12(nc),ab13(nc),ab31(nc),ab23(nc),ab32(nc),
& tc(nc),yc(nc),lamc(nc),muc(nc),sigc(nc),
& tc2(nc),lamc2(nc),muc2(nc),sigc2(nc),vlms(3),
& ppc(nc,4),v(nc,7),alpha(3),alphat(3),edf(3),edfp(3),rep(6)
integer index(n),rindex(n),kt(nc)
data edf/3*5.2/,velt/0.0/,first,mon,fxalph/.true.,2*.false./
data itinx,itoutx,listrs,ifault,ncycle/10,20,3,99,25/,zcut/5.0/
data tolin,tolout,toledf/0.001,0.01,0.05/,rep(6)/0.0/
data word,cmd,lms/'range','','LMS'/,vlms/1.0,2*0.0/
go to 100
111 write(*,'("> ",$)')
read(*,'(a)',end=108) line
call parse(line,word,cmd)
100 if(match(word,'range')) then
c.. Obtain starting values for smoothing parameters
call range(n,t,y,nc,tc,index,rindex,kt,alpha,fxalph,cmd)
else if(match(word,'monitor')) then
c.. Toggle monitoring
if(match(cmd,'on').or.match(cmd,'yes')) then
mon = .true.
else if(match(cmd,'off').or.match(cmd,'no')) then
mon = .false.
else if(lnblnk(cmd).eq.0) then
mon = .not.mon
if(mon) then
write(*,"('Monitoring on')")
else
write(*,"('Monitoring off')")
end if
else
write(0,"('Either yes, no, on or off')")
end if
else if(match(word,'edf')) then
c.. Define edf and adjust alpha accordingly
if(cmd.eq.'') then
write(*,'(3f7.2)') edf
do 110 i = 1,3
if (edf(i).eq.0.0) then
write(*,'(a1," value: ",e11.4)') lms(i:i),vlms(i)
end if
110 continue
else
do 101 i = 1,3
if (edf(i).ge.2.0) then
alpha(i) = alpha(i)*edf(i)**5
edfp(i) = edf(i)
end if
101 continue
read(commas(cmd,3),*,err=112) edf
fxalph=.false.
do 102 i = 1,3
if (edf(i).ge.2.0) then
alpha(i) = alpha(i)/edf(i)**5
if (edf(i).eq.2..and.edfp(i).gt.2.001) alpha(i) = alpha(i)*1e6
if (edf(i).gt.2..and.edfp(i).le.2.001) alpha(i) = alpha(i)/1e6
else if(edf(i).lt.2.0.and.edf(i).ge.1.0) then
edf(i) = 1.0
else if(edf(i).lt.1.0.and.edf(i).ge.0.0) then
edf(i) = 0.0
write(*,'("Enter ",a1," value: ",$)') lms(i:i)
read(*,'(a)',end=111) line
read(commas(line,1),*,err=112) vlms(i)
if(i.eq.1) then
do 103 ic=1,nc
103 lamc(ic) = vlms(i)
else if(i.eq.2) then
if(vlms(i).le.0.0) then
write(0,'("Must be positive")')
else
do 104 ic=1,nc
104 muc(ic) = vlms(i)
endif
else if(i.eq.3) then
if(vlms(i).le.0.0) then
write(0,'("Must be positive")')
else
do 105 ic=1,nc
105 sigc(ic) = vlms(i)
endif
endif
else if(edf(i).lt.0.0) then
write(0,'("Must be non-negative")')
end if
102 continue
end if
else if(match(word,'alpha')) then
c.. Define alpha
if(cmd.eq.'') then
write(*,'(3e11.4)') alpha
else
do 109 i = 1,3
109 alphat(i) = alpha(i)
read(commas(cmd,3),*,err=112) alphat
do 106 i = 1,3
if(alphat(i).gt.0.0) go to 106
write(0,'("Must be positive")')
go to 111
106 continue
do 107 i = 1,3
107 alpha(i) = alphat(i)
fxalph = .true.
end if
else if(match(word,'tscale')) then
c.. Change t scale, as log transform (-1) or inverse velocity transform (1)
if(cmd.eq.'') then
write(*,'(f7.2)') velt
else
veltp = velt
read(commas(cmd,1),*,err=112) velt
if (velt.lt.-1.0.or.velt.gt.1.0) then
write(0,'("Must be between -1 and 1")')
else
rep(6) = velt
end if
endif
else if(match(word,'fit')) then
c.. Fit model
if(first) then
first = .false.
c.. Initialise: set lambda identically 1 (or 0), and mu and sigma
c by simple smoothing of data.
call init(n,y,z,nc,tc,yc,d2c,alpha,lamc,muc,sigc,vlms,
& index,rindex,kt,ppc,v,mon)
end if
if(velt.lt.0.0.and.velt.ne.veltp) then
veltp = velt
call strch(n,nc,tc,tc2,muc,rindex,velt)
call ustrch(n,nc,tc,tc2,rindex,velt)
end if
call iter(n,y,z,u1c,u2c,u3c,alpha,edf,vlms,index,rindex,kt,
& nc,tc,yc,w1c,w2c,w3c,d1c,d2c,d3c,lamc,muc,sigc,ppc,v,
& ab21,ab12,ab13,ab31,ab23,ab32,tc2,
& itinx,itoutx,tolin,tolout,toledf,zcut,rep,fxalph,ifault,mon)
if(ifault.eq.16) then
first = .true.
if(vlms(1).ne.0.0) then
word = 'fit'
vlms(1) = 0.0
go to 100
end if
end if
else if(match(word,'plot')) then
c.. Plot results
if(ifault.eq.99) go to 113
call plot(n,t,y,z,nc,tc,lamc,muc,sigc,v)
else if(match(word,'save')) then
c.. Save results
if(ifault.eq.99) go to 113
read(commas(cmd,1),*,err=112) listrs
call rsave(n,y,z,nc,tc,tc2,lamc,muc,sigc,
& alpha,edf,rep,rindex,listrs)
else if(match(word,'ci')) then
c.. Obtain confidence intervals
if(ifault.eq.99) go to 113
read(commas(cmd,1),*,err=112) ncycle
if(ncycle.gt.0) then
call ci(n,y,alpha,edf,vlms,index,rindex,kt,nc,tc,yc,
& ppc,v,w1c,w2c,w3c,d1c,d2c,d3c,u1c,u2c,u3c,
& z,ab21,ab12,ab13,ab31,ab23,ab32,lamc,muc,sigc,
& tc2,lamc2,muc2,sigc2,
& itinx,itoutx,tolin,tolout,toledf,zcut,rep,ncycle,mon)
go to 108
endif
else if(match(word,'limits')) then
c.. Reset limits
read(commas(cmd,6),*,err=112)
& itinx,itoutx,tolin,tolout,toledf,zcut
zcut = abs(zcut)
write(*,'(
& "limits: itin itout tolin tolout toledf zcutoff"/
& 7x,2i6,3e10.2,f9.2)') itinx,itoutx,tolin,tolout,toledf,zcut
else if(match(word,'quit')) then
c.. Quit
go to 108
else if(match(word,'help')) then
c.. Help
write(*,"('Available commands are:'
& /' alpha ci edf fit help limits !'
& /' monitor plot quit save range tscale #'
& /'or their abbreviations')")
else if(match('!',word)) then
c.. Shell escape
call system(word(2:)//' '//cmd)
else if(match('#',word).or.word.eq.'') then
c.. Comment
else
c.. Unknown command
write(0,"('Unknown command')")
end if
go to 111
112 write(0,"('Error with parameter(s)')")
go to 111
113 write(0,"('No model fitted')")
go to 111
108 return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'loopx.f'
then
echo shar: will not over-write existing file "'loopx.f'"
else
cat << \SHAR_EOF > 'loopx.f'
subroutine loopx(n,t,y,nwk,ncx,wk)
implicit double precision(a-h,o-z)
dimension t(n),y(n),nwk(*),wk(*)
n1 = n+1
c.. Identify duplicate times and obtain relevant index arrays
call grade(n,t,nc,wk(n1),nwk,nwk(n1),nwk(n+n1))
if(nc.gt.ncx) then
write(0,'("not enough memory: nc =",i6," > ncx =",i6)') nc,ncx
return
end if
call loop(n,t,y,
c z,index,rindex,kt,
& wk,nwk,nwk(n1),nwk(n+n1),
c nc,tc,yc,lamc,muc,
& nc,wk(n1),wk(nc+n1),wk(2*nc+n1),wk(3*nc+n1),
c sigc,tc2,lamc2,muc2,
& wk(4*nc+n1),wk(5*nc+n1),wk(6*nc+n1),wk(7*nc+n1),
c sigc2,u1c,u2c,u3c,
& wk(8*nc+n1),wk(9*nc+n1),wk(10*nc+n1),wk(11*nc+n1),
c w1c,w2c,w3c,d1c,
& wk(12*nc+n1),wk(13*nc+n1),wk(14*nc+n1),wk(15*nc+n1),
c d2c,d3c,ab21,ab12,
& wk(16*nc+n1),wk(17*nc+n1),wk(18*nc+n1),wk(19*nc+n1),
c ab13,ab31,ab23,ab32,
& wk(20*nc+n1),wk(21*nc+n1),wk(22*nc+n1),wk(23*nc+n1),
c ppc,v
& wk(24*nc+n1),wk(28*nc+n1))
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'match.f'
then
echo shar: will not over-write existing file "'match.f'"
else
cat << \SHAR_EOF > 'match.f'
subroutine parse(line,word,cmd)
c.. Splits line into first word and remaining command
integer lnblnk,i,n,lim
character*(*) line,word,cmd
word = line
cmd = ''
c.. Check line not empty
n=lnblnk(line)
if(n.eq.0) return
c.. Remove leading blanks
do 1 i=1,n
if(word(1:1).ne.' ') go to 2
word=word(2:n)
n=n-1
1 continue
c.. Extract leading word
2 lim=index(word,' ')
if(lim.eq.0) lim=lnblnk(word)+1
if(n.gt.lim) cmd=word(lim+1:n)
word=word(1:lim-1)
return
end
c-------
logical function match(word,string)
integer lnblnk,i,j,k
character*(*) word,string
c.. Compares characters in word with initial characters of string
c and returns TRUE if they match, FALSE otherwise
c NB case-insensitive comparison
match=.false.
if(word.eq.'') return
do 3 i=1,lnblnk(word)
j=word(i:i)
k=string(i:i)
if(ior(j,32).ne.ior(k,32)) return
3 continue
match = .true.
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'plot.f'
then
echo shar: will not over-write existing file "'plot.f'"
else
cat << \SHAR_EOF > 'plot.f'
subroutine plot(n,t,y,z,nc,tc,lamc,muc,sigc,v)
c.. Subroutine to plot data and/or fitted curves. Arguments are:
c n number of data points
c t(n) ages
c y(n) data points
c z(n) y values converted to z-scores via LMS
c nc number of distinct ages
c tc(nc) distinct ages
c lamc(nc) values for L curve
c muc(nc) values for M curve
c sigc(nc) values for S curve
write(*,'("Not implemented")')
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'range.f'
then
echo shar: will not over-write existing file "'range.f'"
else
cat << \SHAR_EOF > 'range.f'
subroutine range(n,t,y,nc,tc,index,rindex,kt,alpha,fxalph,cmd)
c.. prints out range of t and y, and optionally truncates t range
c calculates starting values for alpha unless fxalph is true
implicit double precision(a-h,o-z)
character cmd*(*),commas*80
logical fxalph
integer index(n),rindex(n),kt(nc)
dimension t(n),y(n),tc(nc),alpha(3)
ta = tc(1)
tz = tc(nc)
tan = ta
tzn = tz
read(commas(cmd,2),*,err=40) tan,tzn
if(tan.lt.ta) tan = ta
if(tzn.gt.tz) tzn = tz
if(tan.eq.ta.and.tzn.eq.tz) go to 20
j = 0
do 10 i = 1,n
if (t(i).lt.tan.or.t(i).gt.tzn) go to 10
j = j + 1
if(j.eq.i) go to 10
t(j) = t(i)
y(j) = y(i)
10 continue
n = j
call grade(n,t,nc,tc,index,rindex,kt)
20 ya = y(1)
yz = ya
do 30 i = 2,n
ya = min(ya,y(i))
30 yz = max(yz,y(i))
write(*,'("n: ",i6," nc: ",i6)') n,nc
write(*,'("Range of t: ",2f12.3)') tc(1),tc(nc)
write(*,'("Range of y: ",2f12.3)') ya,yz
if(fxalph) return
alpha(2) = (tz-ta)**3/(yz-ya)**2/4d2*n
alpha(3) = ((ya+yz)/2)**2*alpha(2)
alpha(1) = alpha(3)*1.2d-4
return
40 write(0,'("Error with parameter(s)")')
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'rdfree.f'
then
echo shar: will not over-write existing file "'rdfree.f'"
else
cat << \SHAR_EOF > 'rdfree.f'
subroutine rdfree(lu,string,n,field,eof)
c
c reads delimited text from lu into field(1...n)
c valid delimiters are comma, tab and/or space(s)
c lu = 0 implies standard input
c lu < 0 implies string already contains data
c
character string*(*), field(n)*(*), ch
c
logical eof,word,endwd,space
c
c read string and add comma at end
c
if(lu.eq.0) then
read(*,'(a)',end=5) string
else if(lu.gt.0) then
read(lu,'(a)',end=5) string
end if
eof = .false.
k = lnblnk(string)
if(k.eq.len(string)) then
write(0,'("rdfree: line length >",i4)') k
k = k - 1
end if
string = string(1:k)//','
k = k + 1
c
c initialise counts
c
c field count
ifield = 0
c column count just before word
icc = 0
c character count in string
i = 0
c
c clear flags
c
c delimiter is space (so ignore next comma/tab)
space = .false.
c character(s) of word read
1 word = .false.
c delimiter detected
endwd = .false.
c
c read each character
c
2 i = i + 1
if(i.gt.k) go to 3
ch = string(i:i)
c
c comma or tab
c
if(ch.eq.','.or.ichar(ch).eq.9) then
if(space) then
space = .false.
else
endwd = .true.
end if
c
c space
c
else if(ch.eq.' ') then
if(word) then
endwd = .true.
space = .true.
end if
c
c any other character
c
else
if(.not.word) then
word = .true.
space = .false.
icc = i - 1
end if
end if
c
c save word
c
c write(0,"('rdfree 3: ',3i3,x,a,x,3l1)")
c & i,icc,ifield,ch,word,endwd,space
c
if(.not.endwd) go to 2
ifield = ifield + 1
if(word) then
field(ifield) = string(icc+1:i-1)
c write(0,"('rdfree 4: ',3i3,x,a,x,3l1)")
c & i,icc,ifield,field(ifield),word,endwd,space
else
field(ifield) = ''
end if
if(ifield.eq.n) return
icc = i
go to 1
c
c clear remaining fields
c
3 if(ifield.lt.n) then
do 4 i=ifield+1,n
field(i) = ''
4 continue
end if
return
c
5 eof = .true.
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'rfcspl.f'
then
echo shar: will not over-write existing file "'rfcspl.f'"
else
cat << \SHAR_EOF > 'rfcspl.f'
function rfcspl(t,n,ppc,ld)
c.. Computes roughness functional (needs previous
c call to csmspl to have ionly = 0).
implicit double precision(a-h,o-z)
dimension t(n),ppc(ld,4)
rfcspl = 0.0
do 16 i = 1,n-1
16 rfcspl = rfcspl+ (t(i+1)-t(i)) *
& (ppc(i,3)**2+ppc(i,3)*ppc(i+1,3)+ppc(i+1,3)**2)
rfcspl = rfcspl/3.0
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'rsave.f'
then
echo shar: will not over-write existing file "'rsave.f'"
else
cat << \SHAR_EOF > 'rsave.f'
subroutine rsave(n,y,z,nc,tc,tc2,lambda,mu,sigma,
& alpha,edf,rep,rindex,listrs)
c..set listrs to: 1 to save n, nc, lik, alpha, edf
c 2 to save t, l, m, s and t* for distinct t (nc values)
c 4 to save t, y, z and t* (n values)
c add required options (default 3)
c NB t* is transformed t (omitted unless fitted)
double precision y(n),z(n),tc(nc),tc2(nc),lambda(nc),
& mu(nc),sigma(nc),alpha(3),edf(3),rep(6),alphat(3)
integer rindex(nc)
character*6 filename
data nfile/0/
if(listrs.le.0.or.and(listrs,7).eq.0) return
6 nfile = nfile + 1
write(filename,"('lms.',i2.2)") nfile
open(7,file=filename,status='new',iostat=ierr,err=7)
c..1
if (and(listrs,1).ne.0) then
do 8 i = 1,3
8 alphat(i) = alpha(i)
if(edf(1).lt.2.0) alphat(1) = lambda(1)
if(edf(2).lt.2.0) alphat(2) = mu(1)
if(edf(3).lt.2.0) alphat(3) = sigma(1)
write(7,100) n,nc,rep,alphat,edf
end if
c..2
if (and(listrs,2).ne.0) then
if(rep(6).eq.0.0) then
do 2 i = 1,nc
write(7,101) tc(i),lambda(i),mu(i),sigma(i)
2 continue
else
do 3 i = 1,nc
write(7,101) tc(i),lambda(i),mu(i),sigma(i),tc2(i)
3 continue
end if
end if
c..4
if (and(listrs,4).ne.0) then
if(rep(6).eq.0.0) then
do 1 i = 1,n
write(7,102) tc(rindex(i)),y(i),z(i)
1 continue
else
do 5 i = 1,n
write(7,102) tc(rindex(i)),y(i),z(i),tc2(rindex(i))
5 continue
end if
end if
100 format(2i6,5f10.2,f6.2/3e12.4/3f8.3)
101 format(5e12.5)
102 format(2f12.5,f6.2,f12.5)
close(7)
write(*,'("Saved to ",a)') filename
go to 4
7 if(nfile.eq.99) then
write(0,"('File lms.99 exists')")
else if(ierr.ne.1017) then
write(0,"('File error',i5)") ierr
else
go to 6
end if
4 return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'strch.f'
then
echo shar: will not over-write existing file "'strch.f'"
else
cat << \SHAR_EOF > 'strch.f'
subroutine strch(n,nc,tc,tc2,yc,rindex,velt)
c.. copies the tc array to tc2, then transforms it
c if velt > 0, it straightens out the yc array (typically the smoothed
c M curve muc) by expanding the tc intervals in proportion to the
c corresponding yc increments
c if velt < 0, it transforms tc -> log(tc+offset), where:
c min(tc) +ve, offset = 0
c min(tc) = 0, offset = -velt/100 * max(tc)
c min(tc) -ve, offset = (velt-1)*min(tc)
c so velt = -1 is sensible default
implicit double precision(a-h,o-z)
integer rindex
dimension tc(nc),yc(nc),rindex(n),tc2(nc)
if (velt.eq.0.0) return
do 5 ic = 1,nc
5 tc2(ic) = tc(ic)
scale = tc2(nc)-tc2(1)
if(velt.gt.0.0) then
ycum = 0.0
do 1 ic = 2,nc
1 ycum = ycum + abs(yc(ic)-yc(ic-1))
scale = scale/ycum
do 2 ic = 2,nc
2 tc(ic) = tc(ic-1)+(1.0-velt)*(tc2(ic)-tc2(ic-1))
& +velt*scale*(abs(yc(ic)-yc(ic-1)))
else
offset = 0.0
if(tc(1).lt.0.0) offset = (velt-1.0)*tc(1)
if(tc(1).eq.0.0) offset = -velt*tc(nc)/1e2
do 3 ic = 1,nc
3 tc(ic) = dlog(tc(ic)+offset)
scale = scale/(tc(nc)-tc(1))
offset = tc2(1)-tc(1)*scale
do 4 ic = 1,nc
4 tc(ic) = offset+tc(ic)*scale
end if
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'trace.f'
then
echo shar: will not over-write existing file "'trace.f'"
else
cat << \SHAR_EOF > 'trace.f'
function trace(nc,ppc,v,ld)
c.. Computes trace(S) = model complexity d.f.
c immediately after a call to csmspl with ionly = 0 or -1
c.. Changed compatibly with csmspl, 4 January 1994.
c Uses the array v: read-only
c Array ppc is destroyed
c Assumes that on entry:
c v(2:nc-1,1:3) contains decomposition of (alphaQ'W^-1Q+R)
c v(1:nc-1,4) contains spacings between design points.
c (using Green and Silverman notation)
c as indeed is arranged by csmspl
implicit double precision(a-h,o-z)
dimension ppc(ld,4),v(ld,7)
do 76 i = 2,nc-1
do 76 j = 1,3
76 ppc(i,j) = v(i,j)
call hdeh(ppc(2,1),ld,nc-2,3,ppc(1,4))
c.. compute trace by new algorithm:
trace = 0.0
do 81 i = 2,nc-1
81 trace = trace+ppc(i,1)*(v(i-1,4)+v(i,4))
do 82 i = 2,nc-2
82 trace = trace+ppc(i,2)*v(i,4)
trace = 2.0+trace/3.0
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'upd.f'
then
echo shar: will not over-write existing file "'upd.f'"
else
cat << \SHAR_EOF > 'upd.f'
subroutine upd(istate,factor,target,ap,ep,alpha,edf,const,mon)
implicit double precision(a-h,o-z)
c.. Simple search algorithm to solve edf = target
c where alpha = constant*edf**factor
c.. To initialise, set istate = 0, and provide values
c for target, alpha and edf
c.. After each call, next alpha is returned.
logical mon
if(target.eq.1.0) then
if(mon) write(*,'("fixed:",f6.2)') const
else if(target.ge.2.0) then
if(istate.eq.0) then
factor = -5.0
nstate = 1
else
factor = (factor+dlog(ap/alpha)/dlog(ep/edf)
& *dabs(ep-edf))/(1.0+dabs(ep-edf))
nstate = 2
end if
if(mon) write(*,'(3f6.2,e11.4)') factor,target,edf,alpha
ap = alpha
ep = edf
alpha = alpha*(target/edf)**factor
istate = nstate
end if
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'ustrch.f'
then
echo shar: will not over-write existing file "'ustrch.f'"
else
cat << \SHAR_EOF > 'ustrch.f'
subroutine ustrch(n,nc,tc,tc2,rindex,velt)
c.. reverses the effect of strch by reverting to the original tc array
c modified tc array saved in tc2
implicit double precision(a-h,o-z)
integer rindex
dimension tc(nc),tc2(nc),rindex(n)
if (velt.eq.0.0) return
do 1 ic = 1,nc
tt = tc2(ic)
tc2(ic) = tc(ic)
1 tc(ic) = tt
return
end
SHAR_EOF
fi # end of overwriting check
# End of shell archive
exit 0