#! /bin/sh
# This is a shell archive, meaning:
# 1. Remove everything above the #! /bin/sh line.
# 2. Save the resulting text in a file.
# 3. Execute the file with /bin/sh (not csh) to create the files:
# DISTRIB2.2
# This archive created: Wed Jul 7 14:25:17 1993
# By: G.P.Nason ()
export PATH; PATH=/bin:$PATH
if test ! -d 'DISTRIB2.2'
then
echo shar: creating directory "'DISTRIB2.2'"
mkdir 'DISTRIB2.2'
fi
echo shar: entering directory "'DISTRIB2.2'"
cd 'DISTRIB2.2'
echo shar: extracting "'Copyright'" '(1692 characters)'
if test -f 'Copyright'
then
echo shar: will not over-write existing file "'Copyright'"
else
cat << \SHAR_EOF > 'Copyright'
/*
* Copyright (C) 1993 Guy Nason
*
* Permission to use, copy, and distribute this software and its
* documentation for any non-commercial purpose without fee
* is hereby granted, provided that the above copyright notice appear
* in all copies and that both that copyright notice and this permission
* notice appear in supporting documentation.
*
* Permission to modify the software is granted, but not the right to
* distribute the modified code.
*
* This software is provided "as is" without express or implied warranty.
*
*
* AUTHOR
*
* Software:
* Guy Nason, gpn@maths.bath.ac.uk
*
*/
Release History
---------------
1.0 Basic 1D wavelet decomposition and reconstruction using some of
Daubechies orthonormal compactly supported wavelets; dumb boundary
handling; wavelet coefficient plotting; dumb thresholding.
2.0 Added 2D wavelet decomposition and reconstruction; added more
wavelets; better boundary handling; more plotting routines
2.1 Added putC, putD functions, more sensible thresholding including
Donoho and Johnstone's universal thresholds; rationalisation of
existing functions;
2.2 Added the following functionality:
* extra method of boundary handling, periodic functions
* new family of wavelets added, Daubechies' least-asymmetric
* new function "draw" that produces nice pictures of
one- and two dimensional wavelets
* new class iwmdc added and facilities to automatically
compress thresholded image wavelet decomp. objects
* improvements to Makefile and automatic dynamic loading.
Thanks to Peter Clifford, Howard Grubb, and Martin Maechler for
providing suggestions and improvements for wavethresh 2.2
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'ImageDecomposeStep.c'" '(6161 characters)'
if test -f 'ImageDecomposeStep.c'
then
echo shar: will not over-write existing file "'ImageDecomposeStep.c'"
else
cat << \SHAR_EOF > 'ImageDecomposeStep.c'
/*
* ImageDecomposeStep - Take an image and do a one level decomp
*
* Error Codes
*
* 0 - Ok.
*
* 1 - Memory error for (afterC) temporary image
*
* 2 - Memory error for (afterD) temporary image
*
* 3 - Memory error for (ccopy) temporary row store
*
* 4 - Memory error for (ccopy_out) temporary row store
*
* 5 - Memory error for (dcopy_out) temporary row store
*
* 6-9 - Memory errors for (afterCC,afterCD,afterDC,afterDD)
* store for the answers
*/
#include
#include
/* Access an element in an image */
#define ACCESS(image, size, i, j) *(image + (i)*(size) + (j))
ImageDecomposeStep(C, Csize, firstCin, H, LengthH,
LengthCout, firstCout, lastCout,
LengthDout, firstDout, lastDout,
cc_out, cd_out, dc_out, dd_out, bc,
error)
double *C; /* Input data image */
long Csize; /* Size of image (side length) */
long firstCin; /* Index number of first element in input "C" image */
double *H; /* Filter coefficients */
long LengthH; /* Length of filter */
/* Details about output image */
long LengthCout;/* Length of C part of output image */
long firstCout; /* Index number of first element in output "C" image */
long lastCout; /* Index number of last element */
long LengthDout;/* Length of D part of output image */
long firstDout; /* Index number of first element in output "D" image */
long lastDout; /* Index number of last element */
double **cc_out;/* Smoothed output image */
double **cd_out;/* Horizontal detail */
double **dc_out;/* Vertical detail */
double **dd_out;/* Diagonal detail */
long bc; /* Method of boundary correction */
long *error; /* Error code */
{
register int j,row,col;
double *ccopy; /* Used to copy input data to convolution routines */
double *ccopy_out;/* Used to copy output data to afterC after conv. */
double *dcopy_out;/* Used to copy output data to afterD after conv. */
double *afterC; /* Temporary store for image data after C convolution */
double *afterD; /* Temporary store for image data after D convolution */
double *afterCC,*afterCD,*afterDC,*afterDD; /* Results */
*error = 0l;
/* Get memory for afterC */
if ((afterC = (double *)malloc((unsigned)(Csize*LengthCout*sizeof(double))))
==NULL){
*error = 1l;
return;
}
/* Get memory for afterD */
if ((afterD = (double *)malloc((unsigned)(Csize*LengthDout*sizeof(double))))
==NULL){
*error = 2l;
return;
}
/* Get memory for row of image to pass to convolution routines */
if ((ccopy = (double *)malloc((unsigned)(Csize*sizeof(double)))) == NULL) {
*error = 3l;
return;
}
/* Get memory for output row after C convolution */
if ((ccopy_out = (double *)malloc((unsigned)(LengthCout*sizeof(double))))
==NULL) {
*error = 4l;
return;
}
/* Get memory for output row after D convolution */
if ((dcopy_out = (double *)malloc((unsigned)(LengthDout*sizeof(double))))
==NULL) {
*error = 5l;
return;
}
/* Do convolutions on rows of C */
for(row=0; row < (int)Csize; ++row) {
/* Copy row of C into ccopy */
for(j=0; j 'Makefile'
# Original version: Copyright Guy Nason 1993
# Thanks to Martin Maechler for helpful comments for this version.
#
# Makefile to generate C object code to be dynamically linked into Splus
#
# Object file generated is wd2d.o
#
# Command to load code into Splus: dyn.load("wd2d.o")
# or if this doesn't work try: dyn.load2("wd2d.o")
#
# DEC machines and S: uncomment the appropriate lines below.
#
# Your S goes here!
S= Splus
# Don't use gcc if you're using SPlus, only use it if you used it to compile
# your version of S.
# CC = gcc
#
# C-source. This is for all the convolution, 1D and 2D decomposition and
# reconstruction methods
#
SOURCE=StoIDS.c ImageDecomposeStep.c convolve.c conbar.c waverecons.c \
wavedecomp.c StoIRS.c
OBJECTS=$(SOURCE:.c=.o)
.KEEP_STATE:
PROGRAM=wd2d.o
CFLAGS=-O
# DEC machines and S, uncomment next line
#CFLAGS=-O -G 0
# Some IBM RS/6000 machines use the following line (turns off optimisation)
#CFLAGS=
LDFLAGS=
# DEC machines and S, uncomment next line
#LDFLAGS=-G 0
# Some IBM RS/6000 machines use the following line (turns off optimisation)
#CFLAGS=
all: $(PROGRAM) S-setup
$(PROGRAM): $(OBJECTS)
ld $(LDFLAGS) -r -d $(OBJECTS) -o $@
-@echo "You may now type 'make cleanobj' to remove unneeded obj. code"
-@echo " ~~~~~~~~~~~~~"
S-setup: Source.S
echo "source('Source.S')" | $(S) > $@.log
touch $@
Source.S: Source-0.S
sed "/dyn.load/s|YOURDIR|`pwd`|" Source-0.S > $@
cleanobj:
rm -f $(OBJECTS)
##-- The following removes everything which can be remade by 'make' --> 130 KBy
clean: cleanobj
rm -f Source.S S-setup* $(PROGRAM) .make.state \
.Data/* .Data/.Audit .Data/.First .Data/.Last*
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'README'" '(1843 characters)'
if test -f 'README'
then
echo shar: will not over-write existing file "'README'"
else
cat << \SHAR_EOF > 'README'
Welcome to wavelets for S and Splus
-----------------------------------
Setting up the software
-----------------------
1. Create a subdirectory to contain all the software
% mkdir WAVELET
% cd WAVELET
2. Unpack the Shell archive
% sh wavethresh2.2
3. This creates a subdirectory called "DISTRIB2.2", enter this directory:
% cd DISTRIB2.2
4. Set up the software by typing:
% make all
%
5. Clean up by typing:
% make cleanobj
Using the software
------------------
Assumptions: the wavelet software is
in directory "/home/fred/WAVELET/DISTRIB2.2/.Data"
1. Start-up S anywhere (/home/fred/WAVELET/DISTRIB2.2 NOT recommended)
% S
>
2. Attach the wavelet software directory
> attach("/home/fred/WAVELET/DISTRIB2.2/.Data")
3. You are now ready to use the software
HINTS:
* Put the command in step 2 into a ".First" file. Then they
will be executed every time you start up S
* Raw S users (e.g. not Splus) may need to adapt the makefile
(change S=Splus to S=S)
* Users of some machines may need to adapt the makefile
(I've had reports for IBM RS machines, DEC machines and Silicon Graphics
machines which may help.)
DOCUMENTATION:
* full S help documentation is supplied with the distribution
* fuller documentation available by anonymous ftp from
gdr.bath.ac.uk in /pub/masgpn as "dws.ps.Z". This is
a compressed PostScript file (This is for Release 2.1).
* The LATEST documentation for Release 2.2 may be obtained by emailing
Guy Nason at gpn@maths.bath.ac.uk
Have fun!
Guy Nason and Bernard Silverman
p.s. all suggestions to Guy Nason, email gpn@maths.bath.ac.uk
n.b. This work was funded by a grant awarded to Prof B W Silverman
from the UK Science and Engineering Research Council (Complex Stochastic
Systems Initiative).
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'Sconvolve.c'" '(2995 characters)'
if test -f 'Sconvolve.c'
then
echo shar: will not over-write existing file "'Sconvolve.c'"
else
cat << \SHAR_EOF > 'Sconvolve.c'
/*
* CONVOLVE - Do filter H filter convolution with boundary
*/
#include
/*
* ACCESSC handles negative accesses, as well as those that exceed the number
* of elements
*/
/*
#define REFLECT(n, lengthC) ( ((n)<0) ? (-1-(n)) : ((n)<(lengthC) ? (n) : (2*(lengthC)-(n)-1)))
*/
#define ACCESSC(c, firstC, lengthC, ix) *(c+reflect(((ix)-(firstC)),(lengthC)))
int convolveC(c_in, lengthcin, firstcin, H, lengthh, c_out, lengthcout,
firstcout, lastcout)
double *c_in; /* Input data */
long *lengthcin;/* Length of this array */
long *firstcin;
double *H; /* Filter */
long *lengthh; /* Length of filter */
double *c_out; /* Output data */
long *lengthcout;/* Length of above array */
long *firstcout;/* First index of C array */
long *lastcout; /* Last index of C array */
{
double sum;
register int k;
register int count_out;
register int m;
int LengthH;
int LengthCin;
int firstCin;
int LengthCout;
int firstCout;
int lastCout;
LengthH = (int)*lengthh;
LengthCin = (int)*lengthcin;
firstCin = (int)*firstcin;
LengthCout = (int)*lengthcout;
firstCout = (int)*firstcout;
lastCout = (int)*lastcout;
count_out = 0;
for(k=firstCout; k<=lastCout; ++k) {
sum = 0.0;
for(m=0; m= 0) && (n < lengthC))
return(n);
else if (n<0) {
n = -1-n;
if (n >= lengthC) {
fprintf(stderr, "reflect: access error (%d,%d)\n", n,lengthC);
exit(2);
}
else
return(n);
}
else {
n = 2*lengthC - n - 1;
if (n<0) {
fprintf(stderr, "reflect: access error (%d,%d)\n", n,lengthC);
exit(3);
}
else
return(n);
}
/* Safety */
fprintf(stderr, "reflect: SHOULD NOT HAVE REACHED THIS POINT\n");
exit(4);
}
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'StoIDS.c'" '(1271 characters)'
if test -f 'StoIDS.c'
then
echo shar: will not over-write existing file "'StoIDS.c'"
else
cat << \SHAR_EOF > 'StoIDS.c'
/* Access an element in an image */
#define ACCESS(image, size, i, j) *(image + (i)*(size) + (j))
StoIDS(C, Csize, firstCin, H, LengthH,
LengthCout, firstCout, lastCout,
LengthDout, firstDout, lastDout,
ImCC, ImCD, ImDC, ImDD, bc,
error)
double *C;
long *Csize;
long *firstCin;
double *H;
long *LengthH;
long *LengthCout;
long *firstCout;
long *lastCout;
long *LengthDout;
long *firstDout;
long *lastDout;
double *ImCC,*ImCD,*ImDC,*ImDD;
long *bc;
long *error;
{
register int i,j;
double *cc_out, *cd_out, *dc_out, *dd_out;
ImageDecomposeStep(C, *Csize, *firstCin, H, *LengthH,
*LengthCout, *firstCout, *lastCout,
*LengthDout, *firstDout, *lastDout,
&cc_out, &cd_out, &dc_out, &dd_out, *bc,
error);
/* Copy images */
for(i=0; i<(int)*LengthDout; ++i) {
for(j=0; j<(int)*LengthDout; ++j)
ACCESS(ImDD, (int)*LengthDout, i, j) = ACCESS(dd_out,
*LengthDout, i, j);
for(j=0; j<(int)*LengthCout; ++j)
ACCESS(ImDC, (int)*LengthDout, j,i) = ACCESS(dc_out,
*LengthDout, j,i);
}
for(i=0; i<(int)*LengthCout; ++i) {
for(j=0; j<(int)*LengthDout; ++j)
ACCESS(ImCD, (int)*LengthCout, j,i) = ACCESS(cd_out,
*LengthCout, j,i);
for(j=0; j<(int)*LengthCout; ++j)
ACCESS(ImCC, (int)*LengthCout,j,i) = ACCESS(cc_out,
*LengthCout, j,i);
}
}
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'Source-0.S'" '(54201 characters)'
if test -f 'Source-0.S'
then
echo shar: will not over-write existing file "'Source-0.S'"
else
cat << \SHAR_EOF > 'Source-0.S'
".First"<-
function()
{
wvrelease()
cat("WARNING: You are in the wavelet software distribution\n")
cat("Take care not to delete any functions\n")
}
"accessC"<-
function(wd.structure, level = wd.structure$nlevels, boundary = F)
{
ctmp <- class(wd.structure)
if(is.null(ctmp))
stop("wd.structure has no class")
else if(ctmp != "wd")
stop("wd.structure is not of class wd")
if(level < 0)
stop("Must have a positive level")
else if(level > wd.structure$nlevels)
stop("Cannot exceed maximum number of levels")
level <- level + 1
first.last.c <- wd.structure$fl.dbase$first.last.c
first.level <- first.last.c[level, 1]
last.level <- first.last.c[level, 2]
offset.level <- first.last.c[level, 3]
if(boundary == T) {
n <- last.level - first.level + 1
coefs <- wd.structure$C[(offset.level + 1):(offset.level + n)]
}
else {
n <- 2^(level - 1)
coefs <- wd.structure$C[(offset.level + 1 - first.level):(
offset.level + n - first.level)]
}
return(coefs)
}
"accessD"<-
function(wd.structure, level, boundary = F)
{
ctmp <- class(wd.structure)
if(is.null(ctmp))
stop("wd.structure has no class")
else if(ctmp != "wd")
stop("wd.structure is not of class wd")
if(level < 0)
stop("Must have a positive level")
else if(level > (wd.structure$nlevels - 1))
stop("Cannot exceed maximum number of levels")
level <- level + 1
first.last.d <- wd.structure$fl.dbase$first.last.d
first.level <- first.last.d[level, 1]
last.level <- first.last.d[level, 2]
offset.level <- first.last.d[level, 3]
if(boundary == T) {
n <- last.level - first.level + 1
coefs <- wd.structure$D[(offset.level + 1):(offset.level + n)]
}
else {
n <- 2^(level - 1)
coefs <- wd.structure$D[(offset.level + 1 - first.level):(
offset.level + n - first.level)]
}
return(coefs)
}
"bwimage.colors"<-
c("-xrm 'splus*colors: black white 246 black'", "-xrm splus*nHalftones: 0",
"-xrm splus*haltoneImage: No", "-xrm 'splus*nColors: 256'")
"compress"<-
function(x, ...)
UseMethod("compress")
"compress.default"<-
function(v, verbose = F)
{
n <- length(v)
r <- sum(v != 0)
if(n > 2 * r) {
position <- (1:n)[v != 0]
values <- v[position]
answer <- list(position = position, values = values,
original.length = n)
class(answer) <- "compressed"
if(verbose == T)
cat("Compressed ", n, " into ", 2 * r, "(", signif((100 *
2 * r)/n, 3), "%)\n")
return(answer)
}
else {
answer <- list(vector = v)
class(answer) <- "uncompressed"
if(verbose == T)
cat("No compression\n")
return(answer)
}
}
"compress.imwd"<-
function(imwd, verbose = F)
{
if(verbose == T) cat("Argument checking...") #
#
# Check class of imwd
#
if(verbose == T)
cat("Argument checking\n")
ctmp <- class(imwd)
if(is.null(ctmp))
stop("imwd has no class")
else if(ctmp != "imwd")
stop("imwd is not of class imwd")
squished <- list(nlevels = imwd$nlevels, fl.dbase = imwd$fl.dbase,
filter = imwd$filter, w0Lconstant = imwd$w0Lconstant, bc = imwd$
bc) #
#
# Go round loop compressing each set of coefficients
#
for(level in 0:(imwd$nlevels - 1)) {
if(verbose == T)
cat("Level ", level, "\n\t")
nm <- lt.to.name(level, "CD")
if(verbose == T)
cat("CD\t")
squished[[nm]] <- compress.default(imwd[[nm]], ver = verbose)
nm <- lt.to.name(level, "DC")
if(verbose == T)
cat("\tDC\t")
squished[[nm]] <- compress.default(imwd[[nm]], ver = verbose)
nm <- lt.to.name(level, "DD")
if(verbose == T)
cat("\tDD\t")
squished[[nm]] <- compress.default(imwd[[nm]], ver = verbose)
}
class(squished) <- c("imwdc")
if(verbose == T)
cat("Overall compression: Was: ", w <- object.size(imwd),
" Now:", s <- object.size(squished), " (", signif((100 *
s)/w, 3), "%)\n")
squished
}
"dof"<-
function(wd)
{
cwd <- class(wd)
if(is.null(cwd)) {
stop("Object has no class")
}
else if(cwd != "wd")
stop("Object is not of class wd")
else {
#
# Count number of non-zero coefficients
#
nlev <- wd$nlevels #
#
# nnonzero counts the number of nonzero coefficients
# This is already 1, since the C contains first level constant
#
nnonzero <- 1
for(i in 0:(nlev - 1)) {
nnonzero <- nnonzero + sum(accessD(wd, lev = i) != 0)
}
}
nnonzero
}
"draw"<-
function(x, ...)
UseMethod("draw")
"draw.default"<-
function(filter.number = 2, family = "DaubExPhase", resolution = 1024, verbose
= F, plot.it = T, main = "Wavelet Picture", sub = zwd$filter$name,
xlab = "x", ylab = "psi", dimension = 1, twodplot = persp, enhance = T,
efactor = 0.050000000000000003, ...)
{
resolution <- resolution/2 #
#
# First obtain support widths
#
sp <- support(filter.number = filter.number, family = family, m = 0, n
= 0)
lh <- c(sp$phi.lh, sp$phi.rh)
lh <- lh[1]
rh <- sp$phi.rh + 2 * resolution - 1
if(verbose == T)
cat("Support of highest resolution wavelets is [", lh, ", ", rh,
"]\n") #
pic.support <- support(filter.number = filter.number, family = family,
m = 0, n = 0)
pic.support <- c(pic.support$psi.lh, pic.support$psi.rh) #
#
# Now go through all levels and see what is the lowest resolution wavelet
# that we can use to get the whole wavelet in the support range of the
# highest resolution wavelets.
#
lowest.level <- log(resolution)/log(2)
if(verbose == T)
cat("Lowest level is: ", lowest.level, "\n")
selection <- NULL
candidates <- NULL
for(m in lowest.level:0) {
if(verbose == T) cat("Level ", m, " testing\n") #
#
# Go through each wavelet at this level and find out
# it's support. Then check to see if it lies in the
# union of the supports of the highest resolution
# wavelets, and select it if it does.
#
# If fact we examine all the ones that will fit, and choose one that
# is near the middle - to get a nice picture.
#
for(n in 0:(2^(lowest.level - m) - 1)) {
lhs <- support(filter.number = filter.number, family =
family, m = m, n = n)
rhs <- lhs$rh
lhs <- lhs$lh
if(verbose == T)
cat("LHS: ", lhs, " RHS: ", rhs, "\n")
if((lhs >= lh) && (rhs <= rh)) {
candidates <- c(candidates, n)
if(verbose == T)
cat("Level ", m, " Position: ", n,
" selected\n")
}
}
if(!is.null(candidates)) {
if(verbose == T) {
cat("Candidates are \n")
print(candidates)
}
n <- floor(median(candidates))
if(verbose == T)
cat("Choosing ", n, "\n")
selection <- list(m = m, n = n)
lhs <- support(filter.number = filter.number, family =
family, m = m, n = n)
rhs <- lhs$rh
lhs <- lhs$lh
break
}
if(!is.null(selection))
break
}
#
#
# If we haven't selected anything, then set the coefficient to
# be one of the highest resolution coefficients. ALL of these
# are guaranteed to be in the union of all their supports!
# The picture will be crap though!
#
if(is.null(selection)) selection <- list(m = 0, n = 0) #
#
# Build a wd object structure consisting solely of zeroes.
#
zwd <- wd(rep(0, length = resolution * 2), filter.number =
filter.number, family = family, bc = "symmetric") #
#
# Insert a vector containing a 1 where we want to put the coefficient
#
wd.lev <- lowest.level - selection$m
if(verbose == T)
cat("Coefficient insertion at wd level: ", wd.lev, "\n")
if(wd.lev == 0)
pickout <- 1
else {
pickout <- rep(0, 2^wd.lev)
pickout[selection$n + 1] <- 1
}
zwd <- putD(zwd, level = wd.lev, v = pickout) #
#
# Reconstruct
#
zwr <- wr(zwd) #
#
# Scales
#
if(verbose == T) {
cat("ps: ", pic.support[1], pic.support[2], "\n")
cat("lh,rh: ", lh, rh, "\n")
cat("lhs,rhs: ", lhs, rhs, "\n")
}
aymax <- ((pic.support[2] - pic.support[1]) * (rh - lh))/(rhs - lhs)
ax <- pic.support[1] - (aymax * (lhs - lh))/(rh - lh)
ay <- ax + aymax
if(verbose == T) cat("ax,ay ", ax, ay, "\n") #
#
# Scale up y values, because we're actually using a higher "resolution"
# wavelet than psi(x)
#
zwr <- zwr * sqrt(2)^(selection$m + 1) #
#
# Plot it if required
#
x <- seq(from = ax, to = ay, length = resolution * 2)
if(enhance == T) {
sv <- (abs(zwr) > efactor * range(abs(zwr))[2])
sv <- (1:length(sv))[sv]
tr <- range(sv)
sv <- tr[1]:tr[2]
x <- x[sv]
zwr <- zwr[sv]
main <- paste(main, " (Enhanced)")
}
if(plot.it == T) {
if(dimension == 1)
plot(x = x, y = zwr, main = main, sub = sub, xlab =
xlab, ylab = ylab, type = "l", ...)
else if(dimension == 2) {
twodplot(x = x, y = x, z = outer(zwr, zwr), xlab = xlab,
ylab = xlab, zlab = ylab, ...)
title(main = main, sub = sub)
invisible()
}
else stop("Can only do 1 or 2 dimensional plots")
}
else {
if(dimension == 1)
return(list(x = x, y = zwr))
else if(dimension == 2)
return(list(x = x, y = x, z = outer(zwr, zwr)))
else stop("Can only do 1 or 2 dimensional plots")
}
}
"draw.imwd"<-
function(wd, resolution = 128, ...)
{
filter <- wd$filter
draw.default(filter.number = filter$filter.number, family = filter$
family, dim = 2, resolution = resolution, ...)
}
"draw.imwdc"<-
function(wd, resolution = 128, ...)
{
filter <- wd$filter
draw.default(filter.number = filter$filter.number, family = filter$
family, dim = 2, resolution = resolution, ...)
}
"draw.wd"<-
function(wd, ...)
{
filter <- wd$filter
draw.default(filter.number = filter$filter.number, family = filter$
family, type = "l", ...)
}
"example.1"<-
function()
{
x <- seq(0, 1, length = 513)
x <- x[1:512]
y <- rep(0, length(x))
xsv <- (x <= 0.5) # Left hand end
y[xsv] <- -16 * x[xsv]^3 + 12 * x[xsv]^2
xsv <- (x > 0.5) & (x <= 0.75) # Middle section
y[xsv] <- (x[xsv] * (16 * x[xsv]^2 - 40 * x[xsv] + 28))/3 - 1.5
xsv <- x > 0.75 #Right hand end
y[xsv] <- (x[xsv] * (16 * x[xsv]^2 - 32 * x[xsv] + 16))/3
list(x = x, y = y)
}
"filter.select"<-
function(filter.number, family = "DaubExPhase", constant = 1)
{
if(family == "DaubExPhase") {
family <- "DaubExPhase" #
#
# The following wavelet coefficients are taken from
# Daubechies, I (1988) Orthonormal Bases of Wavelets
# Communications on Pure and Applied Mathematics. Page 980
# or Ten Lectures on Wavelets, Daubechies, I, 1992
# CBMS-NSF Regional Conference Series, page 195, Table 6.1
#
# Comment from that table reads:
# "The filter coefficients for the compactly supported wavelets
# with extremal phase and highest number of vanishing moments
# compatible with their support width".
#
if(filter.number == 1) {
#
#
# This is for the Haar basis. (not in Daubechies).
#
H <- rep(0, 2)
H[1] <- 1/sqrt(2)
H[2] <- H[1]
filter.name <- c("Haar wavelet")
}
else if(filter.number == 2) {
H <- rep(0, 4)
H[1] <- 0.48296291314500001
H[2] <- 0.83651630373800001
H[3] <- 0.22414386804200001
H[4] <- -0.12940952255099999
filter.name <- c("Daub cmpct on ext. phase N=2")
}
else if(filter.number == 3) {
H <- rep(0, 6)
H[1] <- 0.33267055294999998
H[2] <- 0.80689150931099995
H[3] <- 0.45987750211799999
H[4] <- -0.13501102001000001
H[5] <- -0.085441273881999999
H[6] <- 0.035226291882000001
filter.name <- c("Daub cmpct on ext. phase N=3")
}
else if(filter.number == 4) {
H <- rep(0, 8)
H[1] <- 0.23037781330900001
H[2] <- 0.71484657055300005
H[3] <- 0.63088076793000003
H[4] <- -0.027983769416999999
H[5] <- -0.18703481171899999
H[6] <- 0.030841381835999999
H[7] <- 0.032883011667000001
H[8] <- -0.010597401785
filter.name <- c("Daub cmpct on ext. phase N=4")
}
else if(filter.number == 5) {
H <- rep(0, 10)
H[1] <- 0.160102397974
H[2] <- 0.60382926979700002
H[3] <- 0.72430852843799998
H[4] <- 0.138428145901
H[5] <- -0.242294887066
H[6] <- -0.032244869585000002
H[7] <- 0.077571493840000005
H[8] <- -0.0062414902130000002
H[9] <- -0.012580751999
H[10] <- 0.0033357252850000001
filter.name <- c("Daub cmpct on ext. phase N=5")
}
else if(filter.number == 6) {
H <- rep(0, 12)
H[1] <- 0.11154074335
H[2] <- 0.49462389039799998
H[3] <- 0.751133908021
H[4] <- 0.31525035170900001
H[5] <- -0.22626469396500001
H[6] <- -0.12976686756700001
H[7] <- 0.097501605586999995
H[8] <- 0.027522865529999999
H[9] <- -0.031582039318000001
H[10] <- 0.00055384220099999998
H[11] <- 0.0047772575110000002
H[12] <- -0.001077301085
filter.name <- c("Daub cmpct on ext. phase N=6")
}
else if(filter.number == 7) {
H <- rep(0, 14)
H[1] <- 0.077852054084999997
H[2] <- 0.396539319482
H[3] <- 0.72913209084599995
H[4] <- 0.469782287405
H[5] <- -0.14390600392899999
H[6] <- -0.22403618499399999
H[7] <- 0.071309219266999999
H[8] <- 0.080612609151000006
H[9] <- -0.038029936935000001
H[10] <- -0.016574541631
H[11] <- 0.012550998556
H[12] <- 0.00042957797300000001
H[13] <- -0.001801640704
H[14] <- 0.00035371380000000002
filter.name <- c("Daub cmpct on ext. phase N=7")
}
else if(filter.number == 8) {
H <- rep(0, 16)
H[1] <- 0.054415842243000001
H[2] <- 0.31287159091400002
H[3] <- 0.67563073629699999
H[4] <- 0.58535468365400001
H[5] <- -0.015829105255999999
H[6] <- -0.28401554296199999
H[7] <- 0.00047248457399999999
H[8] <- 0.12874742661999999
H[9] <- -0.017369301002000001
H[10] <- -0.044088253931
H[11] <- 0.013981027917
H[12] <- 0.0087460940470000005
H[13] <- -0.0048703529929999996
H[14] <- -0.00039174037299999999
H[15] <- 0.00067544940599999995
H[16] <- -0.000117476784
filter.name <- c("Daub cmpct on ext. phase N=8")
}
else if(filter.number == 9) {
H <- rep(0, 18)
H[1] <- 0.038077947363999998
H[2] <- 0.24383467461300001
H[3] <- 0.60482312369000002
H[4] <- 0.65728807805099998
H[5] <- 0.13319738582499999
H[6] <- -0.293273783279
H[7] <- -0.096840783222999993
H[8] <- 0.14854074933799999
H[9] <- 0.030725681479000001
H[10] <- -0.067632829061000002
H[11] <- 0.00025094711499999998
H[12] <- 0.022361662123999999
H[13] <- -0.0047232047580000004
H[14] <- -0.0042815036819999997
H[15] <- 0.001847646883
H[16] <- 0.00023038576400000001
H[17] <- -0.00025196318900000002
H[18] <- 3.9347320000000003e-05
filter.name <- c("Daub cmpct on ext. phase N=9")
}
else if(filter.number == 10) {
H <- rep(0, 20)
H[1] <- 0.026670057901000001
H[2] <- 0.188176800078
H[3] <- 0.52720118893199996
H[4] <- 0.688459039454
H[5] <- 0.28117234366100002
H[6] <- -0.24984642432699999
H[7] <- -0.19594627437699999
H[8] <- 0.127369340336
H[9] <- 0.093057364604000006
H[10] <- -0.071394147165999997
H[11] <- -0.029457536821999999
H[12] <- 0.033212674058999997
H[13] <- 0.0036065535670000001
H[14] <- -0.010733175483
H[15] <- 0.001395351747
H[16] <- 0.0019924052950000002
H[17] <- -0.00068585669500000003
H[18] <- -0.000116466855
H[19] <- 9.3588670000000005e-05
H[20] <- -1.3264203000000001e-05
filter.name <- c("Daub cmpct on ext. phase N=10")
}
else {
stop("Unknown filter number for Daubechies wavelets with extremal phase and highest number of vanishing moments..."
)
}
}
else if(family == "DaubLeAsymm") {
family <- "DaubLeAsymm" #
#
# The following wavelet coefficients are taken from
# Ten Lectures on Wavelets, Daubechies, I, 1992
# CBMS-NSF Regional Conference Series, page 198, Table 6.3
#
# Comment from that table reads:
# "The low pass filter coefficients for the "least-asymmetric"
# compactly supported wavelets with maximum number of
# vanishing moments, for N = 4 to 10
#
if(filter.number == 4) {
H <- rep(0, 8)
H[1] <- -0.107148901418
H[2] <- -0.041910965124999998
H[3] <- 0.703739068656
H[4] <- 1.1366582434079999
H[5] <- 0.42123453420399998
H[6] <- -0.14031762417900001
H[7] <- -0.017824701442000001
H[8] <- 0.045570345896000002
filter.name <- c("Daub cmpct on least asymm N=4")
H <- H/sqrt(2)
}
else if(filter.number == 5) {
H <- rep(0, 10)
H[1] <- 0.038654795955000001
H[2] <- 0.041746864421999999
H[3] <- -0.055344186116999997
H[4] <- 0.28199069685400002
H[5] <- 1.023052966894
H[6] <- 0.89658164837999998
H[7] <- 0.023478923136000002
H[8] <- -0.24795136261299999
H[9] <- -0.029842499868999998
H[10] <- 0.027632152957999999
filter.name <- c("Daub cmpct on least asymm N=5")
H <- H/sqrt(2)
}
else if(filter.number == 6) {
H <- rep(0, 12)
H[1] <- 0.021784700327000001
H[2] <- 0.0049366123720000002
H[3] <- -0.166863215412
H[4] <- -0.068323121587000005
H[5] <- 0.69445797295800005
H[6] <- 1.113892783926
H[7] <- 0.47790437133300001
H[8] <- -0.102724969862
H[9] <- -0.029783751298999999
H[10] <- 0.063250562659999995
H[11] <- 0.002499922093
H[12] <- -0.011031867508999999
filter.name <- c("Daub cmpct on least asymm N=6")
H <- H/sqrt(2)
}
else if(filter.number == 7) {
H <- rep(0, 14)
H[1] <- 0.003792658534
H[2] <- -0.0014812259150000001
H[3] <- -0.017870431651
H[4] <- 0.043155452582000001
H[5] <- 0.096014767936000001
H[6] <- -0.070078291222000003
H[7] <- 0.024665659489
H[8] <- 0.75816260196399998
H[9] <- 1.085782709814
H[10] <- 0.408183939725
H[11] <- -0.19805670680699999
H[12] <- -0.152463871896
H[13] <- 0.0056713426860000001
H[14] <- 0.014521394762
filter.name <- c("Daub cmpct on least asymm N=7")
H <- H/sqrt(2)
}
else if(filter.number == 8) {
H <- rep(0, 16)
H[1] <- 0.0026727933929999999
H[2] <- -0.00042839430000000001
H[3] <- -0.021145686528000002
H[4] <- 0.0053863887540000002
H[5] <- 0.069490465910999999
H[6] <- -0.038493521263
H[7] <- -0.073462508760999995
H[8] <- 0.515398670374
H[9] <- 1.0991066305370001
H[10] <- 0.68074534719000002
H[11] <- -0.086653615406000001
H[12] <- -0.20264865528600001
H[13] <- 0.010758611751
H[14] <- 0.044823623042000001
H[15] <- -0.00076669089599999999
H[16] <- -0.0047834585119999997
filter.name <- c("Daub cmpct on least asymm N=8")
H <- H/sqrt(2)
}
else if(filter.number == 9) {
H <- rep(0, 18)
H[1] <- 0.0015124873089999999
H[2] <- -0.00066914150899999997
H[3] <- -0.014515578552999999
H[4] <- 0.012528896241999999
H[5] <- 0.087791251553999999
H[6] <- -0.025786445929999999
H[7] <- -0.27089378350299997
H[8] <- 0.049882830959
H[9] <- 0.87304840734900002
H[10] <- 1.015259790832
H[11] <- 0.33765892360200001
H[12] <- -0.077172161096999994
H[13] <- 0.00082514092900000001
H[14] <- 0.042744433601999997
H[15] <- -0.016303351226000001
H[16] <- -0.018769396835999999
H[17] <- 0.00087650253900000005
H[18] <- 0.0019811937360000001
filter.name <- c("Daub cmpct on least asymm N=9")
H <- H/sqrt(2)
}
else if(filter.number == 10) {
H <- rep(0, 20)
H[1] <- 0.0010891704469999999
H[2] <- 0.00013524502000000001
H[3] <- -0.01222064263
H[4] <- -0.002072363923
H[5] <- 0.064950924578999994
H[6] <- 0.016418869425999998
H[7] <- -0.22555897223400001
H[8] <- -0.100240215031
H[9] <- 0.66707133815399999
H[10] <- 1.0882515305
H[11] <- 0.54281301121299996
H[12] <- -0.050256540092
H[13] <- -0.045240772217999999
H[14] <- 0.070703567549999999
H[15] <- 0.0081528167990000001
H[16] <- -0.028786231926000001
H[17] <- -0.0011375353139999999
H[18] <- 0.0064957283749999999
H[19] <- 8.0661204000000004e-05
H[20] <- -0.000649589896
filter.name <- c("Daub cmpct on least asymm N=10")
H <- H/sqrt(2)
}
else {
stop("Unknown filter number for Daubechies wavelets with\n least asymmetry and highest number of vanishing moments..."
)
}
}
else {
stop("Unknown filter\n")
}
H <- H/constant
return(list(H = H, name = filter.name, family = family, filter.number
= filter.number))
}
"first.last"<-
function(LengthH, DataLength, bc = "periodic")
{
levels <- log(DataLength)/log(2)
first.last.c <- matrix(0, nrow = levels + 1, ncol = 3, dimnames = list(
NULL, c("First", "Last", "Offset")))
first.last.d <- matrix(0, nrow = levels, ncol = 3, dimnames = list(NULL,
c("First", "Last", "Offset")))
if(bc == "periodic") {
# Periodic boundary correction
first.last.c[, 1] <- rep(0, levels + 1)
first.last.c[, 2] <- 2^(0:levels) - 1
first.last.c[, 3] <- rev(c(0, cumsum(rev(1 + first.last.c[, 2])
)[1:levels]))
first.last.d[, 1] <- rep(0, levels)
first.last.d[, 2] <- 2^(0:(levels - 1)) - 1
first.last.d[, 3] <- rev(c(0, cumsum(rev(1 + first.last.d[, 2])
)[1:(levels - 1)]))
ntotal <- 2 * DataLength - 1
ntotal.d <- DataLength - 1
}
else if(bc == "symmetric") {
# Symmetric boundary reflection
first.last.c[levels + 1, 1] <- 0
first.last.c[levels + 1, 2] <- DataLength - 1
first.last.c[levels + 1, 3] <- 0
ntotal <- first.last.c[levels + 1, 2] - first.last.c[levels + 1,
1] + 1
ntotal.d <- 0
for(i in levels:1) {
first.last.c[i, 1] <- trunc(0.5 * (1 - LengthH +
first.last.c[i + 1, 1]))
first.last.c[i, 2] <- trunc(0.5 * first.last.c[i + 1, 2
])
first.last.c[i, 3] <- first.last.c[i + 1, 3] +
first.last.c[i + 1, 2] - first.last.c[i + 1, 1] +
1
first.last.d[i, 1] <- trunc(0.5 * (first.last.c[i + 1,
1] - 1))
first.last.d[i, 2] <- trunc(0.5 * (first.last.c[i + 1,
2] + LengthH - 2))
if(i != levels) {
first.last.d[i, 3] <- first.last.d[i + 1, 3] +
first.last.d[i + 1, 2] - first.last.d[i + 1,
1] + 1
}
ntotal <- ntotal + first.last.c[i, 2] - first.last.c[i,
1] + 1
ntotal.d <- ntotal.d + first.last.d[i, 2] -
first.last.d[i, 1] + 1
}
}
else {
stop("Unknown boundary correction method")
}
names(ntotal) <- NULL
names(ntotal.d) <- NULL
list(first.last.c = first.last.c, ntotal = ntotal, first.last.d =
first.last.d, ntotal.d = ntotal.d)
}
"imwd"<-
function(image, filter.number = 2, bc = "periodic", verbose = F)
{
if(verbose == T)
cat("Argument checking...")
if(nrow(image) != ncol(image))
stop("Number of rows and columns in image are not identical")
if(verbose == T)
cat("...done\nFilter...")
filter <- filter.select(filter.number) # Select wavelet filter
Csize <- nrow(image) #
#
# Check that Csize is a power of 2
#
nlevels <- log(Csize)/log(2)
if(round(nlevels) != nlevels) stop("The image size is not a power of 2"
) #
#
# Set-up first/last database
#
if(verbose == T)
cat("...selected\nFirst/last database...")
fl.dbase <- first.last(LengthH = length(filter$H), DataLength = Csize,
bc = bc) #
first.last.c <- fl.dbase$first.last.c
first.last.d <- fl.dbase$first.last.d #
#
# Set up answer list
#
image.decomp <- list(nlevels = nlevels, fl.dbase = fl.dbase, filter =
filter) #
# We used to return the original image as well, but this is huge.
# image.decomp <- c(image.decomp, list(original = as.vector(image))) #
#
# Load the object code
#
if(verbose == T)
cat("...built\nObject code...")
maybe.load() #
#
# Ok, go round loop doing decompositions
#
if(verbose == T)
cat("...loaded\n")
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary handling")
smoothed <- as.vector(image)
if(verbose == T) {
cat(bc, " boundary handling\n")
cat("Decomposing...")
}
for(level in seq(nrow(first.last.d), 1, -1)) {
if(verbose == T)
cat(level - 1, "")
LengthCin <- first.last.c[level + 1, 2] - first.last.c[level +
1, 1] + 1
LengthCout <- first.last.c[level, 2] - first.last.c[level, 1] +
1
LengthDout <- first.last.d[level, 2] - first.last.d[level, 1] +
1
ImCC <- rep(0, (LengthCout * LengthCout))
ImCD <- rep(0, (LengthCout * LengthDout))
ImDC <- rep(0, (LengthDout * LengthCout))
ImDD <- rep(0, (LengthDout * LengthDout))
error <- 0
z <- .C("StoIDS",
C = as.double(smoothed),
Csize = as.integer(LengthCin),
firstCin = as.integer(first.last.c[level + 1, 1]),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
LengthCout = as.integer(LengthCout),
firstCout = as.integer(first.last.c[level, 1]),
lastCout = as.integer(first.last.c[level, 2]),
LengthDout = as.integer(LengthDout),
firstDout = as.integer(first.last.d[level, 1]),
lastDout = as.integer(first.last.d[level, 2]),
ImCC = as.double(ImCC),
ImCD = as.double(ImCD),
ImDC = as.double(ImDC),
ImDD = as.double(ImDD),
nbc = as.integer(nbc),
error = as.integer(error))
error <- z$error
if(error != 0) {
cat("Error was ", error, "\n")
stop("Error reported")
}
smoothed <- z$ImCC # cat("Assigning wavelet coefficients\n")
nm <- lt.to.name(level - 1, "CD")
image.decomp[[nm]] <- z$ImCD
nm <- lt.to.name(level - 1, "DC")
image.decomp[[nm]] <- z$ImDC
nm <- lt.to.name(level - 1, "DD")
image.decomp[[nm]] <- z$ImDD
}
if(verbose == T)
cat("\nReturning answer...\n")
image.decomp$w0Lconstant <- smoothed
image.decomp$bc <- bc
class(image.decomp) <- "imwd"
image.decomp
}
"imwr"<-
function(x, ...)
UseMethod("imwr")
"imwr.imwd"<-
function(imwd, bc = imwd$bc, verbose = F)
{
if(verbose == T) cat("Argument checking...") #
#
# Check class of imwd
#
ctmp <- class(imwd)
if(is.null(ctmp))
stop("imwd has no class")
else if(ctmp != "imwd")
stop("imwd is not of class imwd")
filter <- imwd$filter
if(verbose == T)
cat("...done\nFirst/last database...")
fl.dbase <- imwd$fl.dbase
first.last.c <- fl.dbase$first.last.c
first.last.d <- fl.dbase$first.last.d
if(verbose == T)
cat("...extracted\nObject code...")
ImCC <- imwd$w0Lconstant #
maybe.load()
if(verbose == T) cat("...loaded\nReconstructing...") #
#
# Ok, go round loop doing reconstructions
#
for(level in seq(2, 1 + imwd$nlevels)) {
if(verbose == T)
cat(level - 1, " ")
LengthCin <- first.last.c[level - 1, 2] - first.last.c[level -
1, 1] + 1
LengthCout <- first.last.c[level, 2] - first.last.c[level, 1] +
1
LengthDin <- first.last.d[level - 1, 2] - first.last.d[level -
1, 1] + 1
error <- 0
ImOut <- rep(0, LengthCout^2)
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary handling")
z <- .C("StoIRS",
ImCC = as.double(ImCC),
ImCD = as.double(imwd[[lt.to.name(level - 2, "CD")]]),
ImDC = as.double(imwd[[lt.to.name(level - 2, "DC")]]),
ImDD = as.double(imwd[[lt.to.name(level - 2, "DD")]]),
LengthCin = as.integer(LengthCin),
firstCin = as.integer(first.last.c[level - 1, 1]),
lastCin = as.integer(first.last.c[level - 1, 2]),
LengthDin = as.integer(LengthDin),
firstDin = as.integer(first.last.d[level - 1, 1]),
lastDin = as.integer(first.last.d[level - 1, 2]),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
LengthCout = as.integer(LengthCout),
firstCout = as.integer(first.last.c[level, 1]),
lastCout = as.integer(first.last.c[level, 2]),
ImOut = as.double(ImOut),
nbc = as.integer(nbc),
error = as.integer(error))
error <- z$error
if(error != 0) {
cat("Error was ", error, "\n")
stop("Error reported")
}
# Do something with ImOut
ImCC <- z$ImOut
}
if(verbose == T)
cat("\nReturning image\n") # Return the image
matrix(ImCC, nrow = 2^(imwd$nlevels))
}
"imwr.imwdc"<-
function(imwd, verbose = F, ...)
{
if(verbose == T)
cat("Uncompressing...\n")
imwd2 <- uncompress(imwd, ver = verbose)
if(verbose == T)
cat("Reconstructing...\n")
imwr(imwd2, verbose = verbose, ...)
}
"last.dump"<-
structure(.Data = list("No Frame Available", "help.start()" =
"No Frame Available", "paste(\"shelp\", gui.map(gui), sep = \"\")" =
"No Frame Available", "gui.map(gui)" = "No Frame Available",
"stop(\"default value for \\\"gui\\\" not set, set with:\\n \\\"options(gui=...)\\\"\")"
= "No Frame Available"), message =
"default value for \"gui\" not set, set with:\n \"options(gui=...)\"")
"lt.to.name"<-
function(level, type)
{
#
# This function converts the level and type (horizontal, vertical, diagonal)
# of wavelet coefficients to a character string "wnLx" which should be
# interpreted as "nth Level, coefficients x", where x is 1, 2 or 3 in the
# scheme of Mallat. (So 1 is horizontal, 2 is vertical and 3 is diagonal).
# w is on the front to indicate that these are wavelet coefficients
#
return(paste("w", as.character(level), "L", switch(type,
CD = "1",
DC = "2",
DD = "3"), sep = ""))
}
"maybe.load"<-
function()
if(!is.loaded(C.symbol("wavedecomp"))) dyn.load("YOURDIR/wd2d.o")
"plot.imwd"<-
function(imwd, scaling = "by.level", co.type = "abs", package = "SPlus",
plot.type = "mallat", arrangement = c(3, 3), transform = F, tfunction
= sqrt)
{
#
#
# Check class of imwd
#
if(package != "SPlus" && package != "S") stop("Unknown package")
ctmp <- class(imwd)
if(is.null(ctmp))
stop("imwd has no class")
else if(ctmp != "imwd")
stop("imwd is not of class imwd")
Csize <- 2^(imwd$nlevels)
m <- matrix(0, nrow = Csize, ncol = Csize)
first.last.d <- imwd$fl.dbase$first.last.d
first.last.c <- imwd$fl.dbase$first.last.c
if(plot.type == "mallat") {
for(level in (imwd$nlevels):1) {
ndata <- 2^(level - 1)
firstD <- first.last.d[level, 1]
lastD <- first.last.d[level, 2]
LengthD <- lastD - firstD + 1
sel <- seq(from = (1 - firstD), length = ndata) #
#
# Extract CD for this level
#
nm <- lt.to.name(level - 1, "CD")
msub1 <- matrix(imwd[[nm]], nrow = LengthD, ncol =
LengthD) #
#
# Extract DC for this level
#
nm <- lt.to.name(level - 1, "DC")
msub2 <- matrix(imwd[[nm]], nrow = LengthD, ncol =
LengthD) #
#
# Extract DD for this level
#
nm <- lt.to.name(level - 1, "DD")
msub3 <- matrix(imwd[[nm]], nrow = LengthD, ncol =
LengthD) #
#
#
# Work out if we want to display the absolute values or the actual
# values
#
if(co.type == "abs") {
msub1 <- abs(msub1)
msub2 <- abs(msub2)
msub3 <- abs(msub3)
}
else if(co.type == "mabs") {
msub1 <- - abs(msub1)
msub2 <- - abs(msub2)
msub3 <- - abs(msub3)
}
else if(co.type != "none")
stop("Unknown co.type")
if(transform == T) {
msub1 <- tfunction(msub1)
msub2 <- tfunction(msub2)
msub3 <- tfunction(msub3)
}
if(scaling == "by.level") {
r.m1 <- range(msub1)
r.m2 <- range(msub2)
r.m3 <- range(msub3)
mu1 <- 249/(r.m1[2] - r.m1[1])
mu2 <- 249/(r.m2[2] - r.m2[1])
mu3 <- 249/(r.m3[2] - r.m3[1])
msub1 <- mu1 * (msub1 - r.m1[1])
msub2 <- mu2 * (msub2 - r.m2[1])
msub3 <- mu3 * (msub3 - r.m3[1])
}
else {
range.msub <- range(c(msub1, msub2, msub3))
multiplier <- 255/(range.msub[2] - range.msub[1
])
msub1 <- multiplier * (msub1 - range.msub[1])
msub2 <- multiplier * (msub2 - range.msub[1])
msub3 <- multiplier * (msub3 - range.msub[1]) #
}
m[(ndata + 1):(2 * ndata), 1:ndata] <- msub1[sel, sel]
m[1:ndata, (ndata + 1):(2 * ndata)] <- msub2[sel, sel]
m[(ndata + 1):(2 * ndata), (ndata + 1):(2 * ndata)] <-
msub3[sel, sel]
}
if(package == "SPlus") {
image(m, xaxt = "n", yaxt = "n")
axis(1, at = c(0, 2^((imwd$nlevels - 3):(imwd$nlevels))
))
axis(2, at = c(0, 2^((imwd$nlevels - 3):(imwd$nlevels))
))
}
else return(m)
}
else if(plot.type == "cols") {
oldpar <- par(mfrow = arrangement, pty = "s")
for(level in (imwd$nlevels:1)) {
ndata <- 2^(level - 1)
firstD <- first.last.d[level, 1]
lastD <- first.last.d[level, 2]
LengthD <- lastD - firstD + 1
sel <- seq(from = (1 - firstD), length = ndata) #
#
# Extract CD for this level
#
nm <- lt.to.name(level - 1, "CD")
msub1 <- matrix(imwd[[nm]], nrow = LengthD, ncol =
LengthD) #
#
# Extract DC for this level
#
nm <- lt.to.name(level - 1, "DC")
msub2 <- matrix(imwd[[nm]], nrow = LengthD, ncol =
LengthD) #
#
# Extract DD for this level
#
nm <- lt.to.name(level - 1, "DD")
msub3 <- matrix(imwd[[nm]], nrow = LengthD, ncol =
LengthD) #
#
#
# Work out if we want to display the absolute values or the actual
# values
#
if(co.type == "abs") {
msub1 <- abs(msub1)
msub2 <- abs(msub2)
msub3 <- abs(msub3)
}
else if(co.type == "mabs") {
msub1 <- - abs(msub1)
msub2 <- - abs(msub2)
msub3 <- - abs(msub3)
}
else if(co.type != "none")
stop("Unknown co.type")
if(transform == T) {
msub1 <- tfunction(msub1)
msub2 <- tfunction(msub2)
msub3 <- tfunction(msub3)
}
if(package == "SPlus") {
xlabstr <- paste("Level", level - 1,
"(horizonatal)")
image(msub1, xlab = xlabstr)
xlabstr <- paste("Level", level - 1,
"(vertical)")
image(msub2, xlab = xlabstr)
xlabstr <- paste("Level", level - 1,
"(diagonal)")
image(msub3, xlab = xlabstr)
}
else {
warning("Not using SPlus")
}
}
par(oldpar)
}
else stop("Unknown plot.type")
}
"plot.imwdc"<-
function(imwdc, verbose = F, ...)
{
imwd <- uncompress(imwdc, verbose = verbose)
return(plot(imwd, ...))
}
"plot.wd"<-
function(wd, xlabels, first.level = 1, main =
"Wavelet Decomposition Coefficients", scaling = "by.level", rhlab = F,
sub, ...)
{
#
# Check class of wd
#
ctmp <- class(wd)
if(is.null(ctmp))
stop("wd has no class")
else if(ctmp != "wd")
stop("wd is not of class wd")
levels <- wd$nlevels
nlevels <- levels - first.level
n <- 2^(levels - 1)
if(missing(sub))
sub <- wd$filter$name
plot(c(0, 0, n, n), c(0, nlevels + 1, nlevels + 1, 0), type = "n", xlab
= "Translate", ylab = "Resolution Level", main = main, yaxt =
"n", xaxt = "n", sub = sub, ...)
axis(2, at = 1:(nlevels), labels = ((levels - 1):first.level))
if(missing(xlabels)) {
axx <- c(0, 2^(nlevels - 2), 2^(nlevels - 1), 2^(nlevels - 1) +
2^(nlevels - 2), 2^nlevels)
axis(1, at = axx)
}
else {
axx <- pretty(1:n, nint = 3)
if(axx[length(axx)] > n)
axx[length(axx)] <- n
axx[axx == 0] <- 1
axl <- signif(xlabels[axx], dig = 3)
axis(1, at = axx, labels = axl)
}
x <- 1:n
height <- 1
first.last.d <- wd$fl.dbase$first.last.d
axr <- NULL
if(scaling == "global") {
my <- 0
for(i in ((levels - 1):first.level)) {
y <- accessD(wd, i)
my <- max(c(my, abs(y)))
}
}
for(i in ((levels - 1):first.level)) {
n <- 2^i
y <- accessD(wd, i)
xplot <- x
ly <- length(y)
if(scaling == "by.level")
my <- max(abs(y))
y <- (0.5 * y)/my
axr <- c(axr, my)
segments(xplot, height, xplot, height + y)
if(i != first.level) {
x1 <- x[seq(1, n - 1, 2)]
x2 <- x[seq(2, n, 2)]
x <- (x1 + x2)/2
height <- height + 1
}
}
if(rhlab == T)
axis(4, at = 1:length(axr), labels = signif(axr, 3))
axr
}
"print.imwd"<-
function(x, ...)
{
cat("Class 'imwd' : Discrete Image Wavelet Transform Object:\n")
cat(" ~~~~ : List with", length(x), "components with names\n")
cat(" ", names(x), "\n\n")
cat("$ wNLx are LONG coefficient vectors !\n")
cat("\nsummary(.):\n----------\n")
summary.imwd(x)
}
"print.imwdc"<-
function(x, ...)
{
cat("Class 'imwdc' : Compressed Discrete Image Wavelet Transform Object:\n"
)
cat(" ~~~~~ : List with", length(x), "components with names\n")
cat(" ", names(x), "\n\n")
cat("$ wNLx are LONG coefficient vectors !\n")
cat("\nsummary(.):\n----------\n")
summary.imwdc(x)
}
"print.wd"<-
function(x, ...)
{
cat("Class 'wd' : Discrete Wavelet Transform Object:\n")
cat(" ~~ : List with", length(x), "components with names\n")
cat(" ", names(x), "\n\n")
cat("$ C and $ D are LONG coefficient vectors !\n")
cat("\nsummary(.):\n----------\n")
summary.wd(x)
}
"putC"<-
function(wd, level, v, boundary = F)
{
if(is.null(class(wd)))
stop("wd is not class wd object")
if(class(wd) != "wd")
stop("wd is not class wd object")
if(level < 0)
stop("level too small")
else if(level > wd$nlevels)
stop("level too big")
flc <- wd$fl.dbase$first.last.c[level + 1, ]
if(boundary == F) {
n <- 2^level
i1 <- flc[3] + 1 - flc[1]
i2 <- flc[3] + n - flc[1]
}
else {
n <- flc[2] - flc[1] + 1
i1 <- flc[3] + 1
i2 <- flc[3] + n
}
if(length(v) != n)
stop("I think that the length of v is wrong")
wd$C[i1:i2] <- v
wd
}
"putD"<-
function(wd, level, v, boundary = F)
{
if(is.null(class(wd)))
stop("wd is not class wd object")
if(class(wd) != "wd")
stop("wd is not class wd object")
if(level < 0)
stop("level too small")
else if(level >= wd$nlevels)
stop("level too big")
fld <- wd$fl.dbase$first.last.d[level + 1, ]
if(boundary == F) {
n <- 2^level
i1 <- fld[3] + 1 - fld[1]
i2 <- fld[3] + n - fld[1]
}
else {
n <- fld[2] - fld[1] + 1
i1 <- fld[3] + 1
i2 <- fld[3] + n
}
if(length(v) != n)
stop("I think that the length of v is wrong")
wd$D[i1:i2] <- v
wd
}
"summary.imwd"<-
function(imwd)
{
#
#
# Check class of imwd
#
ctmp <- class(imwd)
if(is.null(ctmp))
stop("imwd has no class")
else if(ctmp != "imwd")
stop("imwd is not of class imwd")
first.last.c <- imwd$fl.dbase$first.last.c
pix <- first.last.c[imwd$nlevels + 1, 2] - first.last.c[imwd$nlevels +
1, 1] + 1
cat("UNcompressed image wavelet decomposition structure\n")
cat("Levels: ", imwd$nlevels, "\n")
cat("Original image was", pix, "x", pix, " pixels.\n")
cat("Filter was: ", imwd$filter$name, "\n")
cat("Boundary handling: ", imwd$bc, "\n")
}
"summary.imwdc"<-
function(imwdc)
{
#
#
# Check class of imwdc
#
ctmp <- class(imwdc)
if(is.null(ctmp))
stop("imwdc has no class")
else if(ctmp != "imwdc")
stop("imwdc is not of class imwdc")
first.last.c <- imwdc$fl.dbase$first.last.c
pix <- first.last.c[imwdc$nlevels + 1, 2] - first.last.c[imwdc$nlevels +
1, 1] + 1
cat("Compressed image wavelet decomposition structure\n")
cat("Levels: ", imwdc$nlevels, "\n")
cat("Original image was", pix, "x", pix, " pixels.\n")
cat("Filter was: ", imwdc$filter$name, "\n")
cat("Boundary handling: ", imwdc$bc, "\n")
}
"summary.wd"<-
function(wd)
{
pix <- length(accessC(wd))
cat("Levels: ", wd$nlevels, "\n")
cat("Length of original: ", pix, "\n")
cat("Filter was: ", wd$filter$name, "\n")
cat("Boundary handling: ", wd$bc, "\n")
}
"support"<-
function(filter.number = 2, family = "DaubExPhase", m = 0, n = 0)
{
m <- m + 1
if(family == "DaubExPhase") {
a <- - (filter.number - 1)
b <- filter.number
lh <- 2^( + m) * (a + n)
rh <- 2^( + m) * (b + n)
return(list(lh = lh, rh = rh, psi.lh = - (filter.number - 1),
psi.rh = filter.number, phi.lh = 0, phi.rh = 2 *
filter.number - 1))
}
else if(family == "DaubLeAsymm") {
a <- - (filter.number - 1)
b <- filter.number
lh <- 2^( + m) * (a + n)
rh <- 2^( + m) * (b + n)
return(list(lh = lh, rh = rh, psi.lh = - (filter.number - 1),
psi.rh = filter.number, phi.lh = 0, phi.rh = 2 *
filter.number - 1))
}
else {
stop("Unknown family")
}
}
"threshold"<-
function(x, ...)
UseMethod("threshold")
"threshold.imwd"<-
function(imwd, levels = 3:(imwd$nlevels - 1), type = "hard", policy =
"universal", by.level = F, value = 0, dev = var, verbose = F,
return.threshold = F, compression = T)
{
#
#
# Check class of imwd
#
if(verbose == T) cat("Argument checking\n")
ctmp <- class(imwd)
if(is.null(ctmp))
stop("imwd has no class")
else if(ctmp != "imwd")
stop("imwd is not of class imwd")
if(policy != "universal" && policy != "manual" && policy !=
"probability")
stop("Only policys are universal, manual and probability at present"
)
if(type != "hard" && type != "soft")
stop("Only hard or soft thresholding at present")
r <- range(levels)
if(r[1] < 0)
stop("levels out of range, level too small")
if(r[2] > imwd$nlevels - 1)
stop("levels out of range, level too big")
if(r[1] > imwd$nlevels - 1) {
warning("no thresholding done")
return(imwd)
}
if(r[2] < 0) {
warning("no thresholding done")
return(imwd)
}
nthresh <- length(levels)
d <- NULL
n <- 2^(2 * imwd$nlevels) #
# Decide which policy to adopt
# The next if-else construction should define a vector called
# "thresh" that contains the threshold value for each level
# in "levels". This may be the same threshold value
# a global threshold.
#
if(policy == "universal") {
if(verbose == T)
cat("Universal policy...")
if(by.level == F) {
if(verbose == T)
cat("All levels at once\n")
for(i in 1:nthresh) {
d <- c(d, imwd[[lt.to.name(levels[i], "CD")]],
imwd[[lt.to.name(levels[i], "DC")]], imwd[[
lt.to.name(levels[i], "DD")]])
}
noise.level <- sqrt(dev(d))
thresh <- sqrt(2 * log(n)) * noise.level
if(verbose == T)
cat("Global threshold is: ", thresh, "\n")
thresh <- rep(thresh, length = nthresh)
}
else {
if(verbose == T)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
for(i in 1:nthresh) {
d <- c(imwd[[lt.to.name(levels[i], "CD")]],
imwd[[lt.to.name(levels[i], "DC")]], imwd[[
lt.to.name(levels[i], "DD")]])
noise.level <- sqrt(dev(d))
thresh[i] <- sqrt(2 * log(n)) * noise.level
if(verbose == T)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
else if(policy == "manual") {
if(verbose == T)
cat("Manual policy...\n")
thresh <- rep(value, length = nthresh)
if(length(value) != 1 && length(value) != nthresh)
warning("your threshold is not the same length as number of levels"
)
}
else if(policy == "probability") {
if(verbose == T)
cat("Probability policy...")
if(by.level == F) {
if(verbose == T)
cat("All levels at once\n")
for(i in 1:nthresh) {
d <- c(d, imwd[[lt.to.name(levels[i], "CD")]],
imwd[[lt.to.name(levels[i], "DC")]], imwd[[
lt.to.name(levels[i], "DD")]])
}
if(length(value) != 1)
stop("Length of value should be 1")
thresh <- rep(quantile(abs(d), prob = value), length =
nthresh)
if(verbose == T)
cat("Global threshold is: ", thresh[1], "\n")
}
else {
if(verbose == T)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
if(length(value) == 1)
value <- rep(value, nthresh)
if(length(value) != nthresh)
stop("Wrong number of probability values")
for(i in 1:nthresh) {
d <- c(imwd[[lt.to.name(levels[i], "CD")]],
imwd[[lt.to.name(levels[i], "DC")]], imwd[[
lt.to.name(levels[i], "DD")]])
thresh[i] <- quantile(abs(d), prob = value[i])
if(verbose == T)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
if(return.threshold == T)
return(thresh)
for(i in 1:nthresh) {
dCD <- imwd[[lt.to.name(levels[i], "CD")]]
dDC <- imwd[[lt.to.name(levels[i], "DC")]]
dDD <- imwd[[lt.to.name(levels[i], "DD")]]
if(type == "hard") {
dCD[abs(dCD) <= thresh[i]] <- 0
dDC[abs(dDC) <= thresh[i]] <- 0
dDD[abs(dDD) <= thresh[i]] <- 0
if(verbose == T) {
cat("Level: ", levels[i], " there are ", sum(
dCD == 0), ":", sum(dDC == 0), ":", sum(dDD ==
0), " zeroes and: ")
cat(sum(dCD != 0), ":", sum(dDC != 0), ":", sum(
dDD != 0), " nonzeroes\n")
}
}
else if(type == "soft") {
dCD <- sign(dCD) * (abs(dCD) - thresh[i]) * (abs(dCD) >
thresh[i])
dDC <- sign(dDC) * (abs(dDC) - thresh[i]) * (abs(dDC) >
thresh[i])
dDD <- sign(dDD) * (abs(dDD) - thresh[i]) * (abs(dDD) >
thresh[i])
if(verbose == T) {
cat("Level: ", levels[i], " there are ", sum(
dCD == 0), ":", sum(dDC == 0), ":", sum(dDD ==
0), " zeroes and: ")
cat(sum(dCD != 0), ":", sum(dDC != 0), ":", sum(
dDD != 0), " nonzeroes\n")
}
}
imwd[[lt.to.name(levels[i], "CD")]] <- dCD
imwd[[lt.to.name(levels[i], "DC")]] <- dDC
imwd[[lt.to.name(levels[i], "DD")]] <- dDD
}
imwd
if(compression == T)
return(compress(imwd, verbose = verbose))
else return(imwd)
}
"threshold.imwdc"<-
function(imwdc, verbose = F, ...)
{
warning("You are probably thresholding an already thresholded object")
imwd <- uncompress(imwdc, verbose = verbose)
return(threshold(imwd, verbose = T, ...))
}
"threshold.wd"<-
function(wd, levels = 3:(wd$nlevels - 1), type = "hard", policy = "universal",
by.level = F, value = 0, dev = var, boundary = F, verbose = F,
return.threshold = F)
{
#
# Check class of wd
#
if(verbose == T) cat("Argument checking\n")
ctmp <- class(wd)
if(is.null(ctmp))
stop("wd has no class")
else if(ctmp != "wd")
stop("wd is not of class wd")
if(policy != "universal" && policy != "manual" && policy !=
"probability")
stop("Only policys are universal, manual and probability at present"
)
if(type != "hard" && type != "soft")
stop("Only hard or soft thresholding at present")
r <- range(levels)
if(r[1] < 0)
stop("levels out of range, level too small")
if(r[2] > wd$nlevels - 1)
stop("levels out of range, level too big")
if(r[1] > wd$nlevels - 1) {
warning("no thresholding done")
return(wd)
}
if(r[2] < 0) {
warning("no thresholding done")
return(wd)
}
d <- NULL
n <- 2^wd$nlevels
nthresh <- length(levels) #
#
# Decide which policy to adopt
# The next if-else construction should define a vector called
# "thresh" that contains the threshold value for each level
# in "levels". This may be the same threshold value
# a global threshold.
#
if(policy == "universal") {
#
#
# Donoho and Johnstone's universal policy
#
if(verbose == T) cat("Universal policy...")
if(by.level == F) {
if(verbose == T)
cat("All levels at once\n")
for(i in 1:nthresh)
d <- c(d, accessD(wd, level = levels[i],
boundary = boundary))
noise.level <- sqrt(dev(d))
thresh <- sqrt(2 * log(n)) * noise.level
if(verbose == T)
cat("Global threshold is: ", thresh, "\n")
thresh <- rep(thresh, length = nthresh)
}
else {
if(verbose == T)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary =
boundary)
noise.level <- sqrt(dev(d))
thresh[i] <- sqrt(2 * log(n)) * noise.level
if(verbose == T)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
else if(policy == "manual") {
#
#
# User supplied threshold policy
#
if(verbose == T) cat("Manual policy\n")
thresh <- rep(value, length = nthresh)
if(length(value) != 1 && length(value) != nthresh)
warning("your threshold is not the same length as number of levels"
)
}
else if(policy == "probability") {
#
#
# Threshold is quantile based
#
if(verbose == T) cat("Probability policy...")
if(by.level == F) {
if(verbose == T)
cat("All levels at once\n")
for(i in 1:nthresh)
d <- c(d, accessD(wd, level = levels[i],
boundary = boundary))
if(length(value) != 1)
stop("Length of value should be 1")
thresh <- rep(quantile(abs(d), prob = value), length =
nthresh)
if(verbose == T)
cat("Global threshold is: ", thresh[1], "\n")
}
else {
if(verbose == T)
cat("Level by level\n")
thresh <- rep(0, length = nthresh)
if(length(value) == 1)
value <- rep(value, nthresh)
if(length(value) != nthresh)
stop("Wrong number of probability values")
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary =
boundary)
thresh[i] <- quantile(abs(d), prob = value[i])
if(verbose == T)
cat("Threshold for level: ", levels[i],
" is ", thresh[i], "\n")
}
}
}
if(return.threshold == T)
return(thresh)
for(i in 1:nthresh) {
d <- accessD(wd, level = levels[i], boundary = boundary)
if(type == "hard") {
d[abs(d) <= thresh[i]] <- 0
if(verbose == T)
cat("Level: ", levels[i], " there are ", sum(d ==
0), " zeroes\n")
}
else if(type == "soft")
d <- sign(d) * (abs(d) - thresh[i]) * (abs(d) > thresh[
i])
wd <- putD(wd, level = levels[i], v = d, boundary = boundary)
}
wd
}
"uncompress"<-
function(x, ...)
UseMethod("uncompress")
"uncompress.default"<-
function(v, verbose = F)
{
ctmp <- class(v)
if(is.null(ctmp)) {
stop("Object v has no class")
}
else if(ctmp == "uncompressed") {
if(verbose == T)
cat("Not compressed\n")
return(unclass(v$vector))
}
else if(ctmp == "compressed") {
answer <- rep(0, length = v$original.length)
answer[v$position] <- v$values
if(verbose == T)
cat("Uncompressed to length ", length(answer), "\n")
return(answer)
}
else stop("v has unknown class")
}
"uncompress.imwdc"<-
function(imwd, verbose = F)
{
if(verbose == T)
cat("Argument checking\n")
ctmp <- class(imwd)
if(is.null(ctmp))
stop("imwd has no class")
else if(ctmp != c("imwdc"))
stop("imwd is not of class imwdc")
unsquished <- list(nlevels = imwd$nlevels, fl.dbase = imwd$fl.dbase,
filter = imwd$filter, w0Lconstant = imwd$w0Lconstant, bc = imwd$
bc) #
#
# Go round loop compressing each set of coefficients
#
for(level in 0:(imwd$nlevels - 1)) {
if(verbose == T)
cat("Level ", level, "\n\t")
nm <- lt.to.name(level, "CD")
if(verbose == T)
cat("CD\t")
unsquished[[nm]] <- uncompress.default(imwd[[nm]], ver =
verbose)
nm <- lt.to.name(level, "DC")
if(verbose == T)
cat("\tDC\t")
unsquished[[nm]] <- uncompress.default(imwd[[nm]], ver =
verbose)
nm <- lt.to.name(level, "DD")
if(verbose == T)
cat("\tDD\t")
unsquished[[nm]] <- uncompress.default(imwd[[nm]], ver =
verbose)
}
class(unsquished) <- "imwd"
if(verbose == T)
cat("Overall inflation: Was: ", w <- object.size(imwd), " Now:",
s <- object.size(unsquished), " (", signif((100 * s)/w,
3), "%)\n")
unsquished
}
"wd"<-
function(data, filter.number = 2, family = "DaubExPhase", bc = "periodic",
verbose = F)
{
if(verbose == T)
cat("Argument checking...")
DataLength <- length(data) #
#
# Check that we have a power of 2 data elements
#
nlevels <- log(DataLength)/log(2)
if(round(nlevels) != nlevels) stop(
"The length of data is not a power of 2") #
#
# Select the appropriate filter
#
if(verbose == T)
cat("...done\nFilter...")
filter <- filter.select(filter.number = filter.number, family = family)
#
#
# Build the first/last database
#
if(verbose == T)
cat("...selected\nFirst/last database...")
fl.dbase <- first.last(LengthH = length(filter$H), DataLength =
DataLength, bc = bc) #
#
#
# Put in the data
#
C <- rep(0, fl.dbase$ntotal)
C[1:DataLength] <- data #
if(verbose == T)
error <- 1
else error <- 0
if(verbose == T)
cat("built\nObject code...")
maybe.load()
if(verbose == T) cat("...loaded\n") #
#
# Compute the decomposition
#
if(verbose == T)
cat("Decomposing...\n")
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary condition")
wavelet.decomposition <- .C("wavedecomp",
C = as.double(C),
LengthC = as.integer(fl.dbase$ntotal),
D = as.double(rep(0, fl.dbase$ntotal.d)),
LengthD = as.integer(fl.dbase$ntotal.d),
H = as.double(filter$H),
LengthH = as.integer(length(filter$H)),
nlevels = as.integer(nlevels),
firstC = as.integer(fl.dbase$first.last.c[, 1]),
lastC = as.integer(fl.dbase$first.last.c[, 2]),
offsetC = as.integer(fl.dbase$first.last.c[, 3]),
firstD = as.integer(fl.dbase$first.last.d[, 1]),
lastD = as.integer(fl.dbase$first.last.d[, 2]),
offsetD = as.integer(fl.dbase$first.last.d[, 3]),
nbc = as.integer(nbc),
error = as.integer(error))
if(verbose == T)
cat("done\n")
error <- wavelet.decomposition$error
if(error != 0) {
cat("Error ", error, " occured in wavedecomp\n")
stop("Error")
}
l <- list(C = wavelet.decomposition$C, D = wavelet.decomposition$D,
nlevels = wavelet.decomposition$nlevels, fl.dbase = fl.dbase,
filter = filter, bc = bc)
class(l) <- "wd"
l
}
"wdobjectdir"<-
"/files/goshawk/sd6h/gpn/projects/WAVELETS/CURRENT"
"wdobjectname"<-
"wd2d.o"
"wr"<-
function(wd, start.level = 0, verbose = F, bc = wd$bc, return.object = F,
filter.number = wd$filter$filter.number, family = wd$filter$family)
{
if(verbose == T) cat("Argument checking...") #
#
# Check class of wd
#
if(verbose == T)
cat("Argument checking\n")
ctmp <- class(wd)
if(is.null(ctmp))
stop("wd has no class")
else if(ctmp != "wd")
stop("wd is not of class wd")
if(start.level < 0)
stop("start.level must be nonnegative")
if(start.level >= wd$nlevels)
stop("start.level must be less than the number of levels")
if(is.null(wd$filter$filter.number))
stop("NULL filter.number for wd")
if(bc != wd$bc)
warning("Boundary handling is different to original")
filter <- filter.select(filter.number = filter.number, family = family)
LengthH <- length(filter$H) #
#
# Build the reconstruction first/last database
#
if(verbose == T)
cat("...done\nFirst/last database...")
r.first.last.c <- wd$fl.dbase$first.last.c[(start.level + 1):(wd$
nlevels + 1), ] #
ntotal <- r.first.last.c[1, 3] + r.first.last.c[1, 2] - r.first.last.c[
1, 1] + 1
names(ntotal) <- NULL
C <- accessC(wd, level = start.level, boundary = T)
C <- c(rep(0, length = (ntotal - length(C))), C)
nlevels <- wd$nlevels - start.level
error <- 0 #
#
# Load object code
#
if(verbose == T)
cat("...built\nObject code...")
maybe.load()
if(verbose == T) {
cat("...loaded\nReconstruction...")
error <- 1
}
nbc <- switch(bc,
periodic = 1,
symmetric = 2)
if(is.null(nbc))
stop("Unknown boundary handling")
wavelet.reconstruction <- .C("waverecons",
C = as.double(C),
LengthC = as.integer(length(C)),
D = as.double(wd$D),
LengthD = as.integer(length(wd$D)),
H = as.double(filter$H),
LengthH = as.integer(LengthH),
nlevels = as.integer(nlevels),
firstC = as.integer(r.first.last.c[, 1]),
lastC = as.integer(r.first.last.c[, 2]),
offsetC = as.integer(r.first.last.c[, 3]),
firstD = as.integer(wd$fl.dbase$first.last.d[, 1]),
lastD = as.integer(wd$fl.dbase$first.last.d[, 2]),
offsetD = as.integer(wd$fl.dbase$first.last.d[, 3]),
nbc = as.integer(nbc),
error = as.integer(error))
if(verbose == T)
cat("done\n")
error <- wavelet.reconstruction$error
if(error != 0) {
cat("Error code returned from waverecons: ", error, "\n")
stop("waverecons returned error")
}
fl.dbase <- list(first.last.c = r.first.last.c, ntotal =
wavelet.reconstruction$LengthC, first.last.d = wd$fl.dbase$
first.last.d, ntotal.d = wd$fl.dbase$ntotal.d)
l <- list(C = wavelet.reconstruction$C, D = wavelet.reconstruction$D,
fl.dbase = fl.dbase, nlevels = wavelet.reconstruction$nlevels,
filter = filter, bc = bc)
class(l) <- "wd"
if(return.object == T)
return(l)
else return(accessC(l))
stop("Shouldn't get here\n")
}
"wvrelease"<-
function()
{
cat("S wavelet software, release 2.2, installed\n")
cat("Copyright Guy Nason 1993\n")
}
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'StoIRS.c'" '(3711 characters)'
if test -f 'StoIRS.c'
then
echo shar: will not over-write existing file "'StoIRS.c'"
else
cat << \SHAR_EOF > 'StoIRS.c'
/* Access an element in an image */
#define ACCESS(image, size, i, j) *(image + (i)*(size) + (j))
#include
StoIRS(ImCC, ImCD, ImDC, ImDD,
LengthCin, firstCin, lastCin,
LengthDin, firstDin, lastDin,
H, LengthH,
LengthCout, firstCout, lastCout,
ImOut, bc, error)
double *ImCC,*ImCD,*ImDC,*ImDD;
long *LengthCin, *firstCin, *lastCin;
long *LengthDin, *firstDin, *lastDin;
double *H;
long *LengthH;
long *LengthCout, *firstCout, *lastCout;
double *ImOut;
long *bc;
long *error;
{
*error = 0l;
ImageReconstructStep(ImCC, ImCD, ImDC, ImDD,
*LengthCin, *firstCin, *lastCin,
*LengthDin, *firstDin, *lastDin,
H, *LengthH,
*LengthCout, *firstCout, *lastCout,
ImOut, bc,
error);
}
int ImageReconstructStep(ImCC, ImCD, ImDC, ImDD,
LengthCin, firstCin, lastCin,
LengthDin, firstDin, lastDin,
H, LengthH,
LengthCout, firstCout, lastCout,
ImOut,
bc,
error)
double *ImCC,*ImCD,*ImDC,*ImDD;
long LengthCin, firstCin, lastCin;
long LengthDin, firstDin, lastDin;
double *H;
long LengthH;
long LengthCout, firstCout, lastCout;
double *ImOut;
long *bc;
long *error;
{
register int i,j;
double *c_in;
double *d_in;
double *c_out;
double *toC;
double *toD;
/* Get memory for c_in and d_in */
if ((c_in = (double *)malloc((unsigned)LengthCin*sizeof(double)))==NULL) {
*error = 1l;
return;
}
if ((d_in = (double *)malloc((unsigned)LengthDin*sizeof(double)))==NULL) {
*error = 2l;
return;
}
if ((c_out = (double *)malloc((unsigned)LengthCout*sizeof(double)))==NULL) {
*error = 3l;
return;
}
if ((toC = (double *)malloc((unsigned)LengthCin*LengthCout*sizeof(double)))
== NULL) {
*error = 4l;
return;
}
/* Now apply C and D filters to CC and CD to obtain toC */
for(i=0; i 'conbar.c'
/*
* CONBAR: Does the reconstruction convolution
*/
#define ACCESSC(c, firstC, lengthC, ix, bc) *(c+reflect(((ix)-(firstC)),(lengthC), (bc)))
#define CEIL(i) ( ((i)>0) ? ( ((i)+1)/2):((i)/2) )
int conbar(c_in, LengthCin, firstCin, lastCin,
d_in, LengthDin, firstDin, lastDin,
H, LengthH,
c_out, LengthCout, firstCout, lastCout, bc)
double *c_in;
int LengthCin;
int firstCin;
int lastCin; /* Code probably doesn't need this */
double *d_in;
int LengthDin;
int firstDin;
int lastDin;
double *H;
int LengthH;
double *c_out;
int LengthCout;
int firstCout; /* This determines summation over n */
int lastCout; /* and this does too */
int bc;
{
register int n,k;
double sumC, sumD;
/* Compute each of the output C */
for(n=firstCout; n<=lastCout; ++n) {
/* We want n+1-LengthH <= 2*k to start off */
k = CEIL(n+1-LengthH);
sumC = 0.0;
while( 2*k <= n ) {
sumC += *(H + n - 2*k) * ACCESSC(c_in, firstCin, LengthCin,
k, bc);
++k;
}
/* Now do D part */
k = CEIL(n-1);
sumD = 0.0;
while( 2*k <= (LengthH +n -2) ) {
sumD += *(H+1+2*k-n) * ACCESSC(d_in, firstDin, LengthDin,
k, bc);
++k;
}
if (n & 1) /* n odd */
sumC -= sumD;
else
sumC += sumD;
ACCESSC(c_out, firstCout, LengthCout, n, bc) = sumC;
}
}
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'convolve.c'" '(3462 characters)'
if test -f 'convolve.c'
then
echo shar: will not over-write existing file "'convolve.c'"
else
cat << \SHAR_EOF > 'convolve.c'
/*
* CONVOLVE - Do filter H filter convolution with boundary
*/
#include
#include "wavelet.h"
/*
* ACCESSC handles negative accesses, as well as those that exceed the number
* of elements
*/
#define ACCESSC(c, firstC, lengthC, ix, bc) *(c+reflect(((ix)-(firstC)),(lengthC),(bc)))
int convolveC(c_in, LengthCin, firstCin, H, LengthH, c_out, LengthCout,
firstCout, lastCout, bc)
double *c_in; /* Input data */
int LengthCin; /* Length of this array */
double *H; /* Filter */
int LengthH; /* Length of filter */
double *c_out; /* Output data */
int LengthCout; /* Length of above array */
int firstCout; /* First index of C array */
int lastCout; /* Last index of C array */
int bc; /* Method of boundary correction PERIODIC, SYMMETRIC */
{
double sum;
register int k;
register int count_out;
register int m;
count_out = 0;
for(k=firstCout; k<=lastCout; ++k) {
sum = 0.0;
for(m=0; m= 0) && (n < lengthC))
return(n);
else if (n<0) {
if (bc==PERIODIC) {
/*
n = lengthC+n;
*/
n = n%lengthC + lengthC*((n%lengthC)!=0);
if (n < 0) {
fprintf(stderr, "reflect: access error (%d,%d)\n",
n,lengthC);
fprintf(stderr, "reflect: left info from right\n");
exit(2);
}
else
return(n);
}
else if (bc==SYMMETRIC) {
n = -1-n;
if (n >= lengthC) {
fprintf(stderr, "reflect: access error (%d,%d)\n",
n,lengthC);
exit(3);
}
else
return(n);
}
else {
fprintf(stderr, "reflect: Unknown boundary correction");
fprintf(stderr, " value of %d\n", bc);
exit(4);
}
}
else {
if (bc==PERIODIC) {
/*
printf("periodic extension, was %d (%d) now ",n,lengthC);
n = n - lengthC;
*/
n %= lengthC;
/*
printf("%d\n", n);
*/
if (n >= lengthC) {
fprintf(stderr, "reflect: access error (%d,%d)\n",
n,lengthC);
fprintf(stderr, "reflect: right info from left\n");
exit(5);
}
else
return(n);
}
else if (bc==SYMMETRIC) {
n = 2*lengthC - n - 1;
if (n<0) {
fprintf(stderr, "reflect: access error (%d,%d)\n",
n,lengthC);
exit(6);
}
else
return(n);
}
else {
fprintf(stderr, "reflect: Unknown boundary correction\n");
exit(7);
}
}
/* Safety */
fprintf(stderr, "reflect: SHOULD NOT HAVE REACHED THIS POINT\n");
exit(8);
}
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'wavedecomp.c'" '(2657 characters)'
if test -f 'wavedecomp.c'
then
echo shar: will not over-write existing file "'wavedecomp.c'"
else
cat << \SHAR_EOF > 'wavedecomp.c'
#include "wavelet.h"
#define ACCESSC(l,r) *(C + *(offsetC+(l)) + (r) - *(firstC+(l)))
#define ACCESSD(l,r) *(D + *(offsetD+(l)) + (r) - *(firstD+(l)))
wavedecomp(C, LengthC, D, LengthD, H, LengthH, levels, firstC,lastC,
offsetC, firstD, lastD, offsetD, bc, error)
double *C; /* Input data, and the subsequent smoothed data */
long *LengthC; /* Length of C array */
double *D; /* The wavelet coefficients */
long *LengthD; /* Length of D array */
double *H; /* The smoothing filter H */
long *LengthH; /* Length of smoothing filter */
long *levels; /* The number of levels in this decomposition */
long *firstC; /* The first possible C coef at a given level */
long *lastC; /* The last possible C coef at a given level */
long *offsetC; /* Offset from C[0] for certain level's coeffs */
long *firstD; /* The first possible D coef at a given level */
long *lastD; /* The last possible D coef at a given level */
long *offsetD; /* Offset from D[0] for certain level's coeffs */
long *bc; /* Method of boundary correction */
long *error; /* Error code */
{
register int next_level,k,at_level;
register int verbose; /* Controls message printing, passed in error var*/
if (*error == 1l)
verbose = 1;
else
verbose = 0;
if (verbose) {
if (*bc == PERIODIC)
printf("Periodic boundary method\n");
else if (*bc == SYMMETRIC)
printf("Symmetric boundary method\n");
else {
printf("Unknown boundary correction method\n");
*error = 1;
return;
}
printf("Decomposing into level: ");
}
*error = 0l;
for(next_level = *levels - 1; next_level >= 0; --next_level) {
if (verbose)
printf("%d ", next_level);
at_level = next_level + 1;
convolveC( (C+*(offsetC+at_level)),
(int)(*(lastC+ at_level) - *(firstC+at_level)+1),
(int)(*(firstC+at_level)),
H,
(int)*LengthH,
(C+*(offsetC+next_level)),
(int)(*(lastC+next_level) - *(firstC+next_level)+1),
(int)(*(firstC+next_level)),
(int)(*(lastC+next_level)) , (int)*bc);
convolveD( (C+*(offsetC+at_level)),
(int)(*(lastC+ at_level) - *(firstC+at_level)+1),
(int)(*(firstC+at_level)),
H,
(int)*LengthH,
(D+*(offsetD+next_level)),
(int)(*(lastD+next_level) - *(lastD+next_level)+1),
(int)(*(firstD+next_level)),
(int)(*(lastD+next_level)), (int)*bc );
}
if (verbose)
printf("\n");
return(0);
}
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'wavelet.h'" '(55 characters)'
if test -f 'wavelet.h'
then
echo shar: will not over-write existing file "'wavelet.h'"
else
cat << \SHAR_EOF > 'wavelet.h'
#define PERIODIC 1
#define SYMMETRIC 2
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'waverecons.c'" '(2543 characters)'
if test -f 'waverecons.c'
then
echo shar: will not over-write existing file "'waverecons.c'"
else
cat << \SHAR_EOF > 'waverecons.c'
/*
* waverecons: Do 1D wavelet reconstruction
*/
#include "wavelet.h"
#define ACCESSC(l,r) *(C + *(offsetC+(l)) + (r) - *(firstC+(l)))
#define ACCESSD(l,r) *(D + *(offsetD+(l)) + (r) - *(firstD+(l)))
waverecons(C, LengthC, D, LengthD, H, LengthH, levels,
firstC, lastC, offsetC, firstD, lastD, offsetD, bc, error)
double *C; /* Input data, and the subsequent smoothed data */
long *LengthC; /* Length of C array */
double *D; /* The wavelet coefficients */
long *LengthD; /* Length of D array */
double *H; /* The smoothing filter H */
long *LengthH; /* Length of smoothing filter */
long *levels; /* The number of levels in this decomposition */
long *firstC; /* The first possible C coef at a given level */
long *lastC; /* The last possible C coef at a given level */
long *offsetC; /* Offset from C[0] for certain level's coeffs */
long *firstD; /* The first possible D coef at a given level */
long *lastD; /* The last possible D coef at a given level */
long *offsetD; /* Offset from D[0] for certain level's coeffs */
long *bc; /* Which boundary handling are we doing */
long *error; /* Error code */
{
register int next_level, at_level;
register int verbose; /* Printing messages, passed in error */
if (*error == 1l)
verbose = 1;
else
verbose = 0;
if (verbose) {
if ((int)*bc == PERIODIC)
printf("Periodic boundary handling\n");
else if ((int)*bc == SYMMETRIC)
printf("Symmetric boundary handling\n");
else {
printf("Unknown boundary handling\n");
*error = 2l;
return;
}
printf("Building level: ");
}
*error = 0l;
for(next_level = 1; next_level <= *levels; ++next_level) {
if (verbose)
printf("%d ", next_level);
at_level = next_level - 1;
conbar( (C+*(offsetC+at_level)),
(int)(*(lastC+at_level) - *(firstC+at_level) + 1),
(int)(*(firstC+at_level)),
(int)(*(lastC+at_level)),
(D+*(offsetD+at_level)),
(int)(*(lastD+at_level) - *(firstD+at_level) + 1),
(int)(*(firstD+at_level)),
(int)(*(lastD+at_level)),
H,
(int)*LengthH,
(C+*(offsetC+next_level)),
(int)(*(lastC+next_level) - *(firstC+next_level)+1),
(int)(*(firstC+next_level)),
(int)(*(lastC+next_level)),
(int)(*bc) );
}
if (verbose)
printf("\n");
return(0);
}
SHAR_EOF
fi # end of overwriting check
if test ! -d '.Data'
then
echo shar: creating directory "'.Data'"
mkdir '.Data'
fi
echo shar: entering directory "'.Data'"
cd '.Data'
echo shar: extracting "'.Last.value'" '(20 characters)'
if test -f '.Last.value'
then
echo shar: will not over-write existing file "'.Last.value'"
else
cat << \SHAR_EOF > '.Last.value'
S data
SHAR_EOF
fi # end of overwriting check
if test ! -d '.Help'
then
echo shar: creating directory "'.Help'"
mkdir '.Help'
fi
echo shar: entering directory "'.Help'"
cd '.Help'
echo shar: extracting "'accessC'" '(3438 characters)'
if test -f 'accessC'
then
echo shar: will not over-write existing file "'accessC'"
else
cat << \SHAR_EOF > 'accessC'
.BG
.FN accessC
.TL
Get smoothed data from wavelet structure.
.DN
The smoothed and original data from a wavelet decomposition structure
(returned from wd) are packed into a single vector in that structure.
This function extracts the data corresponding to a particular resolution level.
.CS
accessC(wd.structure, level=wd.structure$levels, boundary=F)
.RA
.AG wd.structure
Wavelet decomposition structure from which you wish to extract the
smoothed or original data if the structure is from a wavelet decomposition,
or the reconstructed data if the structure is from a wavelet reconstruction.
.OA
.AG level
The level that you wish to extract. By default, this is the level with
most detail (in the case of structures from a decomposition this is the
original data, in the case of structures from a reconstruction this is the
top-level reconstruction).
.AG boundary
If this argument is T then all of the boundary correction values
will be returned as well (note: the length of the returned vector
may not be a power of 2). If boundary is false, then just the
coefficients will be returned. If the decomposition (or reconstruction)
was done with periodic boundary conditions then this option has
no effect.
.RT
A vector of the extracted data.
.DT
The wd (wr) function produces a wavelet decomposition (reconstruction)
structure.
For decomposition, the top level contains the original data, and
subsequent lower levels contain the successively smoothed data.
So if there are 2^m original data points, there will be m+1 levels
indexed 0,1,...,m. So
> accessC(wd.structure, level=m)
pulls out the original data, as does
> accessC(wd.structure)
To get hold of lower levels just specify the level that you're interested
in, e.g.
> accessC(wd.structure, level=2)
Gets hold of the second level.
For reconstruction, the top level contains the ultimate step in the
Mallat pyramid reconstruction algorithm, lower levels are intermediate
steps.
The need for this function is a consequence of the pyramidal structure
of Mallat's algorithm and the memory efficiency gain achieved by
storing the pyramid as a linear vector. AccessC obtains information about
where the smoothed data appears from the fl.dbase component of
wd.structure, in particular the array fl.dbase$first.last.c which
gives a complete specification of index numbers and offsets for
wd.structure$C.
Note that this and the accessD function only work with objects of class
'wd'.
Note also that this function only gets information from 'wd' class
objects. To put coefficients etc. into `wd' structures you have to
use the "putC" function.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
Any book on wavelets, especially
.sp
Chui, C. K. (1992)
.ul
An Introduction to Wavelets.
Academic Press, London.
.sp
Daubechies, I. (1988)
.ul
Orthonormal bases of compactly supported wavelets
Communications on Pure and Applied Mathematics, Vol. 41, 909-996
.sp
Mallat, S. G. (1989)
.ul
A theory for multiresolution signal decomposition: the wavelet representation
IEEE Transactions on Pattern Analysis and Machine Intelligence.
Vol. 11, Number 7 674-693.
.SA
`wr', `wd', `accessD', `filter.select', `plot.coefs', `dyn.load'
`hard.threshold', `soft.threshold', `putC', `putD', `draw'
.EX
#
# Get the 3rd level of smoothed data from a decomposition
#
> accessC(wd(data), level=3)
#
# Plot the time series from a reconstruction
#
> tsplot(accessC(reconstruction))
.KW manip
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'accessD'" '(2565 characters)'
if test -f 'accessD'
then
echo shar: will not over-write existing file "'accessD'"
else
cat << \SHAR_EOF > 'accessD'
.BG
.FN accessD
.TL
Get wavelet expansion coefficients from wavelet structure.
.DN
The coefficients from a wavelet expansion in a wavelet decomposition structure
(returned from wd or wr) are packed into a single vector in that structure.
This function extracts the coefficients
corresponding to a particular resolution level.
.CS
accessD(wd.structure, level, boundary=F)
.RA
.AG wd.structure
Wavelet decomposition structure from which you wish to extract the
expansion coefficients.
.AG level
The level that you wish to extract. If the "original" data has
2^m data points then there are m possible levels that you could want
to access, indexed by 0,1,...,(m-1).
.AG boundary
If this argument is T then all of the boundary correction values
will be returned as well (note: the length of the returned vector
may not be a power of 2). If boundary is false, then just the
coefficients will be returned. If the decomposition (or reconstruction)
was done using periodic boundary handling then this option has no
effect.
.RT
A vector of the extracted coefficients.
.DT
The wd (wr) function produces a wavelet decomposition (reconstruction)
structure.
The need for this function is a consequence of the pyramidal structure
of Mallat's algorithm and the memory efficiency gain achieved by
storing the pyramid as a linear vector. AccessD obtains information about
where the coefficients appear from the fl.dbase component of
wd.structure, in particular the array fl.dbase$first.last.d which
gives a complete specification of index numbers and offsets for
wd.structure$D.
Note that this function and accessC only work on objects of class
`wd'. Also, you have to use putD to put wavelet coefficients into a
wd object.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
Any book on wavelets, especially
.sp
Chui, C. K. (1992)
.ul
An Introduction to Wavelets.
Academic Press, London.
.sp
Daubechies, I. (1988)
.ul
Orthonormal bases of compactly supported wavelets
Communications on Pure and Applied Mathematics, Vol. 41, 909-996
.sp
Mallat, S. G. (1989)
.ul
A theory for multiresolution signal decomposition: the wavelet representation
IEEE Transactions on Pattern Analysis and Machine Intelligence.
Vol. 11, Number 7 674-693.
.SA
`wr', `wd', `accessC', `filter.select', `plot.coefs', `dyn.load'
`hard.threshold', 'soft.threshold', `putC', `putD', `draw'
.EX
#
# Get the 3rd level coefficients of a decomposition
#
> accessD(wd(data), level=3)
#
# Do a qqnorm plot to assess the normality of some coefficients
#
> qqnorm(accessD(wd(data), level=8)
.KW manip
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'bwimage.colors'" '(782 characters)'
if test -f 'bwimage.colors'
then
echo shar: will not over-write existing file "'bwimage.colors'"
else
cat << \SHAR_EOF > 'bwimage.colors'
.BG D
.FN bwimage.colors
.TL
Sets up X11 colours to correspond to PostScript image prints (wavelet).
.PP
Sets a uniform grey scale for displaying images. Effectively installs
a colourmap for the X11 driver. There are 248 gray scales ranging from
white (S color 1) through to black (S color 248) with a uniform
grey scale in between. Ideally I would like 255 grey shades but my
the X11 device does not install its own colour map. I don't think the
other graphics devices do either, but if you know different then
let me know.
.PP
You can change the number of grey scales by altering the 246 in the
first character vector to something else.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.EX
#
# Fire up X11 device ready for displaying images
#
X11(bwimage.colors)
.KW sysdata
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'compress'" '(401 characters)'
if test -f 'compress'
then
echo shar: will not over-write existing file "'compress'"
else
cat << \SHAR_EOF > 'compress'
.BG
.FN compress
.TL
Compression - Generic function (wavelet)
.DN
Compresses object by removing zeroes
.GE
`imwd', `default'
.CS
compress(x, ...)
.RA
.AG x
an S-PLUS object
.OA
.AG ...
methods may have additional arguments.
.RT
a compressed version of the supplied argument
.DT
See individual help pages for operation
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`threshold', `uncompress'
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'compress.default'" '(1622 characters)'
if test -f 'compress.default'
then
echo shar: will not over-write existing file "'compress.default'"
else
cat << \SHAR_EOF > 'compress.default'
.BG
.FN compress.default
.TL
Compress a vector (wavelet)
.DN
Compression of a vector by removal of zero elements.
.CS
compress.default(v, verbose=F)
.RA
.AG v
The vector to compress
.OA
.AG verbose
If this is true then report on compression activity.
.RT
An object of class "compressed" if compression took place, otherwise a
an object of class "uncompressed"
.DT
Images are large objects. Thresholded 2d wavelet objects (imwd) are also
large, but many of their elements are zero. Compress takes a vector,
decides whether compression is necessary and if it is makes an object
of class "compressed" containing the nonzero elements and their position
in the original vector.
The decision whether to compress the vector or not depends on two
things, first the number of non-zero elements in the vector (r, say),
and second the length of the vector (n, say). Since the position and value
of the non-zero elements is stored we will need to store 2r values
for the non-zero elements. So compression takes place if 2r < n.
.ME compress default
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH BUGS
Sometimes the compressed object can be larger than the original.
This usually only happens for small objects, so it doesn't really
matter.
.SA
`compressed.object', `uncompressed.object'
.EX
#
# Compress a vector with lots of zeroes
#
> compress(c(rep(0,100),99))
$position:
[1] 101
$values:
[1] 99
$original.length:
[1] 101
attr(, "class"):
[1] "compressed"
#
# Try to compress a vector with not many zeroes
#
> compress(1:10)
$vector:
[1] 1 2 3 4 5 6 7 8 9 10
attr(, "class"):
[1] "uncompressed"
.KW manip
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'compress.imwd'" '(797 characters)'
if test -f 'compress.imwd'
then
echo shar: will not over-write existing file "'compress.imwd'"
else
cat << \SHAR_EOF > 'compress.imwd'
.BG
.FN compress.imwd
.TL
Use `compress()' on an `imwd' object
.DN
Compresses a (thresholded) imwd object by removing zero elements
.CS
compress.imwd(imwd, verbose=F)
.RA
.AG imwd
Object to compress. Compression only works on thresholded imwd objects.
.OA
.AG verbose
If this is true then report on compression activity
.RT
An object of type "imwdc" representing the compressed imwd object.
.DT
Thresholded imwd objects are usually very large and contain many zero
elements. This function compresses these objects into smaller "imwdc"
objects by using the "compress.default" function and removing the zeroes.
.ME compress imwd
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`compress.default', `threshold', `imwdc.object'
.EX
#
# The user shouldn't need to use this function
#
.KW manip
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'compressed.object'" '(988 characters)'
if test -f 'compressed.object'
then
echo shar: will not over-write existing file "'compressed.object'"
else
cat << \SHAR_EOF > 'compressed.object'
.BG D
.FN compressed.object
.TL
Compressed object (wavelet)
.DN
These are objects of classes
.Cs
"compressed"
.Ce
They are lists with components representing a vector
.SH GENERATION
By compress.default
.SH METHODS
The `"compressed"' class of objects has methods for the following generic
functions:
.br
`uncompress.default'
.SH INHERITANCE
None (yet).
.PP
.SH STRUCTURE
The following components must be included in a legitimate `compressed' object.
.RC position
a vector containing the position of the non-zero elements in the original vector
.RC values
a vector, corresponding position-by-position to the position vector,
containing the values of the original vector
.RC original.length
the original length of the uncompressed vector
.DT
The compress.default function tries to compress a vector, if it can
it produces an object of type "compressed", otherwise it produces
an object of type "uncompressed".
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`compress'.
.KW classes
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'dof'" '(625 characters)'
if test -f 'dof'
then
echo shar: will not over-write existing file "'dof'"
else
cat << \SHAR_EOF > 'dof'
.BG
.FN dof
.TL
Compute degrees of freedom (wavelet)
.DN
Compute degrees of freedom for wavelet thresholding model
.CS
dof(wd)
.RA
.AG wd
The wavelet object
.RT
The number of degrees of freedom.
.DT
The wavelet thresholding is a non-linear transform and dof returns
the approximate number of degrees of freedom.
.SH REFERENCES
Donoho, D and Johnstone, I.
.sp
Ideal Spatial Adaption by Wavelet Shrinkage.
.SA
`threshold'
.EX
#
# Do a wavelet decomposition of noisy data
#
> ywd <- wd(ynoise)
#
# Threshold
#
> threshy <- threshold(ywd)
#
# Compare degrees of freedom
#
> dof(ywd)
[1] 512
> dof(threshy)
[1] 11
.KW models
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'draw'" '(610 characters)'
if test -f 'draw'
then
echo shar: will not over-write existing file "'draw'"
else
cat << \SHAR_EOF > 'draw'
.BG
.FN draw
.TL
Draw - Generic wavelet function
.DN
Draw a picture of a wavelet
.GE
`wd',`imwd',`imwdc'
.CS
draw(x, ...)
.RA
.AG x
an S-PLUS object.
.OA
.AG ...
methods may have additional arguments.
.PP
.GR
.Tl
.SE
a plot is created on the current graphics device.
.SH REFERENCES
Daubechies, I. (1988)
.ul
Orthonormal bases of compactly supported wavelets
Communications on Pure and Applied Mathematics, Vol. 41, 909-996
.SA
`draw.default', `wd.object'
.EX
#
# Do a wavelet decomposition
#
> ywd <- wd(ynoise)
#
# Draw a picture of the wavelets associated with the decomposition
#
> draw(ywd)
.KW hplot
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'draw.default'" '(3912 characters)'
if test -f 'draw.default'
then
echo shar: will not over-write existing file "'draw.default'"
else
cat << \SHAR_EOF > 'draw.default'
.BG
.FN draw.default
.TL
Draw a picture of a wavelet
.DN
Draws pictures of wavelets associated with the wavethresh package
.CS
draw.default(filter.number=2, family="DaubExPhase",
resolution=1024, verbose=F, plot.it=T, main="Wavelet Picture",
sub=zwd$filter$name, xlab="x", ylab="psi", dimension=1,
twodplot=persp, enhance=T, efactor=0.05, ...)
.OA
.AG filter.number
The number of the filter in the wavelet family specified by filter to draw,
the range of numbers depends on the family.
.AG family
The family of wavelets to use, can be "DaubExPhase" or "DaubLeAsymm" for
Release 2.2.
.AG resolution
Specifies the resolution to use in drawing pictures. A higher resolution
gives a better quality picture but will take longer to draw (especially
in two-dimensions).
.AG verbose
Reports the progress of drawing
.AG plot.it
If this is T then an attempt is made to plot the appropriate wavelet
on the current active graphics device, otherwise the information
about the wavelet is returned.
.AG main
The main title
.AG sub
The subtitle
.AG xlab
A label for the x axis.
.AG ylab
A label for the y axis.
.AG dimension
If this is 1, then a one-dimensional version of the wavelet is drawn,
if this is 2, then a two-d picture is drawn with the twodplot function.
.AG twodplot
A function like persp that can do something interesting with a matrix!
.AG enhance
With Daubechies' wavelets their effective support is much less than
their actual support. If enhance=T then the function tries to draw
the wavelet on it's effective support.
.AG efactor
This defines the effective support.
Define z0 to be efactor times the maximum absolute value of the wavelet w(x).
Define the set A={ x: |w(x)| > z0 }, and the effective support is the
smallest interval (1D), (square - 2D) containing A.
.AG ...
Other arguments you might like to supply to plot or twodplot.
.RT
If plot.it is false then no actual drawing is done, however, a list is
returned. If dim=1 the list has two components x and y, representing the
domain of definition (or effective support) of the wavelet and the
value of the wavelet at x. If dim=2, the list has three components
x,y and z.
.SE
a picture of a wavelet is drawn on the currently active graphics device
if plot.it=T.
.DT
For Daubechies' compactly supported wavelets there is no known closed form
expression. However, it is possible to make use of the inverse wavelet
transform to draw pictures of the wavelets. The idea is to set
all but one wavelet coefficient equal to zero and that one coefficient
equal to one in a wavelet expansion.
Then the inverse transform (reconstruction) is applied to the expansion
and a picture of one wavelet is produced.
A method similar to the one we present here is presented in Daubechies (1992)
in Section~6.5 on the cascade algorithm.
The principle is simple, but the implementation to get good pictures is
surprisingly tricky. Ideally you want to put in the non-zero coefficient
at the lowest resolution level as possible, this will give you as much
detail as possible. However, the support of some of the large-scale
wavelets is greater than the union of the supports of all the high-resolution
small-scale wavelets and so it may not be possible to draw a complete wavelet.
This function analyses the supports of the wavelets at different levels
and finds a coefficient to set to one by choosing the wavelet at the lowest
possible resolution and whose support is within the union of supports of
the highest resolution wavelets. If there is
a choice of such wavelets, then the middle-most one is selected.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
Daubechies, I. (1992)
.ul
Ten Lectures on Wavelets
CBMS-NSF Regional Conference Series in Applied Mathematics
.SA
`draw'
.EX
#
# Draw a 2-dimensional Daubechies least asymmetric wavelet
#
> draw.default(filter.number=6, family="DaubLeAsymm, dim=2,
resolution=128)
.KW hplot
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'draw.imwd'" '(794 characters)'
if test -f 'draw.imwd'
then
echo shar: will not over-write existing file "'draw.imwd'"
else
cat << \SHAR_EOF > 'draw.imwd'
.BG
.FN draw.imwd
.TL
Use `draw()' on an `imwd' object
.DN
Draws picture of wavelet associated with 2-d wavelet decomposition object.
.CS
draw.imwd(wd, ...)
.RA
Object of class "imwd", maybe from a wavelet decomposition using
the imwd function.
.OA
.AG ...
Other arguments to draw.default
.SE
Causes a picture of a wavelet associated with the imwd object to be
drawn on the current graphics device.
.DT
Sometimes it is more convenient to use this function rather than
draw.default, since you want a picture of the wavelet that did your
decomposition.
.ME draw imwd
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`draw.default', `imwd'
.EX
#
# Do wavelet decomposition on image: piccy
#
> pwd <- imwd(piccy)
#
# Draw a picture of the wavelets that were used
#
> draw(pwd)
.KW hplot
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'draw.imwdc'" '(832 characters)'
if test -f 'draw.imwdc'
then
echo shar: will not over-write existing file "'draw.imwdc'"
else
cat << \SHAR_EOF > 'draw.imwdc'
.BG
.FN draw.imwdc
.TL
Use `draw()' on an `imwdc' object
.DN
Draws picture of wavelet associated with 2-d wavelet decomposition object.
.CS
draw.imwdc(wd, ...)
.RA
Object of class "imwdc", maybe from a thresholding using
the threshold function.
.OA
.AG ...
Other arguments to draw.default
.SE
Causes a picture of a wavelet associated with the imwdc object to be
drawn on the current graphics device.
.DT
Sometimes it is more convenient to use this function rather than
draw.default, since you want a picture of the wavelet that did your
decomposition.
.ME draw imwdc
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`draw.default', `imwdc'
.EX
#
# Do wavelet decomposition on image: piccy
# and then thresholding
#
> pwd <- threshold(imwd(piccy))
#
# Draw a picture of the wavelets that were used
#
> draw(pwd)
.KW hplot
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'draw.wd'" '(774 characters)'
if test -f 'draw.wd'
then
echo shar: will not over-write existing file "'draw.wd'"
else
cat << \SHAR_EOF > 'draw.wd'
.BG
.FN draw.wd
.TL
Use `draw()' on a `wd' object
.DN
Draws picture of wavelet associated with wavelet decomposition object.
.CS
draw.wd(wd, ...)
.RA
Object of class "wd", maybe from a wavelet decomposition using
the wd function.
.OA
.AG ...
Other arguments to draw.default
.SE
Causes a picture of a wavelet associated with the wd object to be
drawn on the current graphics device.
.DT
Sometimes it is more convenient to use this function rather than
draw.default, since you want a picture of the wavelet that did your
decomposition.
.ME draw wd
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`draw.default', `wd'
.EX
#
# Do wavelet decomposition on vector: ynoise
#
> ywd <- wd(ynoise)
#
# Draw a picture of the wavelets that were used
#
> draw(ywd)
.KW hplot
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'filter.select'" '(2485 characters)'
if test -f 'filter.select'
then
echo shar: will not over-write existing file "'filter.select'"
else
cat << \SHAR_EOF > 'filter.select'
.BG
.FN filter.select
.TL
Provide wavelet filter coefficients.
.DN
This function stores the filter coefficients necessary for doing a discrete
wavelet transform (and its inverse).
.CS
filter.select(filter.number, family="DaubExPhase", constant=1)
.RA
.AG filter.number
This selects the desired filter, an integer that takes a value dependent
upon the family that you select.
.OA
.AG family
This selects the type of wavelet.
.AG constant
This constant is applied as a multiplier to all the coefficients.
It can be a vector, and so you can adapt the filter coefficients
to be whatever you want. (This is feature of negative utility).
.RT
A list is returned with four components describing the filter:
.RC H
A vector containing the filter coefficients.
.RC name
A character string containing the name of the filter.
.RC family
A character string containing the family of the filter.
.RC filter.number
The filter number used to select the filter.
.DT
This function contains three types of filter.
Two types can be selected with family set to DaubExPhase, these
wavelets are the Haar wavelet (selected by filter.number=1 within
this family) and Daubechies ``extremal phase'' wavelets selected by
filter.numbers ranging from 2 to 10) as proposed in Daubechies (1988).
Setting family to DaubLeAsymm gives you Daubechies least asymmetric
wavelets also from Daubechies (1988), but here the filter number ranges
from 4 to 10.
For Daubechies wavelets, filter.number corresponds to the
N of that paper, the wavelets become more regular as the filter.number
increases, but they are all of compact support.
.SH NOTE
The filter coefficients should always sum to sqrt(2). This is
a useful check on their validity.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
Any book on wavelets, especially
.sp
Chui, C. K. (1992)
.ul
An Introduction to Wavelets.
Academic Press, London.
.sp
Daubechies, I. (1988)
.ul
Orthonormal bases of compactly supported wavelets
Communications on Pure and Applied Mathematics, Vol. 41, 909-996
.SA
`wd', `wr', `plot.coefs', `accessC', `accessD', `imwd', `imwr',
`threshold', `draw'
.EX
This function is currently used by `wr' and `wd' in decomposing and
reconstructing, however you may wish to look at the coefficients.
#
# look at the filter coefficients for N=2
#
> filter.select(2)
$H:
[1] 0.4829629 0.8365163 0.2241439 -0.1294095
$name:
[1] "Daub cmpct on ext. phase N=2"
$family:
[1] "DaubExPhase"
$filter.number:
[1] 2
.KW math
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'first.last'" '(3843 characters)'
if test -f 'first.last'
then
echo shar: will not over-write existing file "'first.last'"
else
cat << \SHAR_EOF > 'first.last'
.BG
.FN first.last
.TL
Build a first/last database for wavelet transforms.
.DN
This function is not intended for user use, but is used by various
functions involved in computing and displaying wavelet transforms.
.CS
first.last(LengthH, DataLength, bc="periodic")
.RA
.AG LengthH
Length of the filter used to produce a wavelet decomposition.
.AG DataLength
Length of the data before transforming. This must be a power of 2,
say 2^m.
.OA
.AG bc
This argument determines how the boundaries of the the function are to
be handled. The permitted values are periodic or symmetric.
.RT
A first/last database structure, a list containing the
following information:
.RC first.last.c
A (m+1)x3 matrix. The first column specifies the real index of the first
coefficient of the smoothed data at a level, the 2nd column is the
real index of the last coefficient, the last column specifies the offset
of the first smoothed datum at that level. The offset is used by the
C code to work out where the beginning of the sequence is within a packed
vector of the pyramid structure. The first and 2nd columns can be used
to work out how many numbers there are at a level. If bc="periodic" then
the pyramid is a true power of 2 pyramid, that is it starts with a
power of 2, and the next level is half of the previous.
If bc="symmetric" then the pyramid is nearly exactly a power of 2, but
not quite, see the Details section for why this is so.
.RC ntotal
The total number of smoothed data/original data points.
.RC first.last.d
A mx3 matrix. As for first.last.c but for the wavelet coefficients packed
as the D component of a wavelet structure.
.RC ntotal.d
The total number of wavelet coefficients.
.DT
Suppose you begin with 2^m=2048 coefficients. At the next level you
would expect 1024 smoothed data coefficients, and 1024 wavelet coefficients,
and if bc="periodic" this is indeed what happens.
However, if bc="symmetric"
you actually need more than 1024 (as
the wavelets extend over the edges). The first last database keeps track
of where all these "extras" appear and also where they are located in
the packed vectors C and D of pyramidal coefficients within wavelet
structures.
For example, given a first.last.c row of
-2 3 20
The actual coefficients would be
c_{-2}, c_{-1}, c_{0}, c_{1}, c_{2}, c_{3}
(in LaTeX notation where _{x} denotes subscript x). In other words, there
are 6 coefficients, starting at -2 and ending at 3, and the first of these
(c_{-2}) appears at an offset of 20 from the beginning of the $C
component vector of the wavelet structure.
You can ``do'' first.last in your head for periodic boundary handling
but for more general boundary treatments (e.g. symmetric) first.last
is indispensable.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
The numbers in first last databases were worked out from inequalities
derived from:
Daubechies, I. (1988)
.ul
Orthonormal bases of compactly supported wavelets
Communications on Pure and Applied Mathematics, Vol. 41, 909-996
.SA
`wr', `accessC', `accessD', `filter.select', `plot.coefs', `dyn.load'
`threshold', `wd', `imwd', `imwr'
.SH BUGS
None, I hope. However, with hindsight, I should have implemented the
periodic version first. The symmetric boundary stuff confused a lot
of people (including me)!
.EX
#
#If you're twisted then you may just want to look at one of these.
#
> first.last(length(filter.select(2)), 64)
$first.last.c:
First Last Offset
[1,] 0 0 126
[2,] 0 1 124
[3,] 0 3 120
[4,] 0 7 112
[5,] 0 15 96
[6,] 0 31 64
[7,] 0 63 0
$ntotal:
[1] 127
$first.last.d:
First Last Offset
[1,] 0 0 62
[2,] 0 1 60
[3,] 0 3 56
[4,] 0 7 48
[5,] 0 15 32
[6,] 0 31 0
$ntotal.d:
[1] 63
.KW misc
.KW utilities
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'imwd'" '(2695 characters)'
if test -f 'imwd'
then
echo shar: will not over-write existing file "'imwd'"
else
cat << \SHAR_EOF > 'imwd'
.BG
.FN imwd
.TL
Discrete wavelet transform for images (decomposition)
.DN
This function performs the decomposition stage of Mallat's pyramid algorithm
i.e. the discrete wavelet transform for images.
.CS
imwd(image, filter.number=2, bc="periodic", verbose=F)
.RA
.AG image
Square matrix containing the image. The number of rows in the image
must be a power of 2. Since the matrix is square, this is also the number
of columns in the matrix.
.OA
.AG filter.number
The filter that you wish to use to decompose the function. The filters
are obtained from the "filter.select" function and are the compactly
supported orthonormal wavelets as described in Daubechies, I.
.AG bc
boundary treatment. The periodic (default) treatment causes the decomposition
to act as if the function you are trying to decompose is periodic (on it's
own interval). The other option is symmetric, which used to be the
default. This causes the decomposition to act as if the function extended
by symmetric reflection at each end.
.AG verbose
If this argument is true then informative messages are printed
whilst the computations are performed.
.RT
An object of class "imwd", a list containing the wavelet coefficients
(see "imwd.object").
.SE
None
.DT
The 2D algorithm is essentially the application of many 1D filters.
First, the columns are attacked with the smoothing (H) and bandpass
(G) filters, and the rows of each of these resultant images are
attacked again with each of G and H, this results in 4 images.
Three of them, GG, GH, and HG correspond to the highest resolution
wavelet coefficients. The HH image is a smoothed version of the
original and can be further attacked in exactly the same way as the
original image to obtain GG(HH), GH(HH), and HG(HH), the wavelet
coefficients at the second highest resolution level and HH(HH)
the twice-smoothed image, which then goes on to be further attacked.
After each attack the dimension of the images is halved.
After many attacks
you will obtain four real numbers, one of which correspond to the
image smoothed many times.
Exact details of the algorithm are to be found in
Mallat 1989.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
.sp
Daubechies, I. (1988)
.ul
Orthonormal bases of compactly supported wavelets
Communications on Pure and Applied Mathematics, Vol. 41, 909-996
.sp
Mallat, S. G. (1989)
.ul
A theory for multiresolution signal decomposition: the wavelet representation
IEEE Transactions on Pattern Analysis and Machine Intelligence.
Vol. 11, Number 7 674-693.
.SA
`imwr', `plot', `draw'
.EX
#
# Do a decomposition of an image
#
tdecomp <- imwd(test.image)
#
# Look at the coefficients
#
plot(tdecomp)
.KW smooth
.KW nonlinear
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'imwd.object'" '(2616 characters)'
if test -f 'imwd.object'
then
echo shar: will not over-write existing file "'imwd.object'"
else
cat << \SHAR_EOF > 'imwd.object'
.BG D
.FN imwd.object
.TL
Wavelet decomposition object (2D).
.DN
These are objects of classes
.Cs
"imwd"
.Ce
They represent a decomposition of a 2D function with respect to
a 2D wavelet basis.
.SH GENERATION
This class of objects is returned from the `imwd' function
to represent a wavelet decomposition of a image (or other 2D function).
Other functions also return a `imwd.object'
.SH METHODS
The `"imwd"' class of objects has methods for the following generic
functions:
.br
`plot', `threshold', `summary', `print', `draw', `imwr', `compress'.
.SH INHERITANCE
None (yet).
.PP
.SH STRUCTURE
The following components must be included in a legitimate `imwd' object.
.RC nlevels
number of levels in wavelet decomposition. If you raise 2 to the power
of nlevels then you get the dimension of the image that you originally
started with.
.RC fl.dbase
The first last database associated with the decomposition. For images,
this list is not very useful as each level's components is stored as
a list component, rather than being packaged up in a single vector
as in the 1D case. Nevertheless the internals still need to know
about fl.dbase to get the computations correct.
.RC filter
A filter object as returned by "filter.select". This component records
the filter used in the decomposition. The reconstruction routines
use this component to find out what filter to use in reconstruction.
.RC wNLx
The object will probably contain many components with names of
this form. These are all the wavelet coefficients of the decomposition.
In "wNLx" the "N" refers to the level number and the "x" refers to
the direction of the coefficients with "1" being horizontal,
"2" being vertical and "3" being diagonal. Note that the levels
should be in numerically decreasing order, so if nlevels is 5, then
there will be w5L1, w5L2, w5L3 first, then down to w1L1, w1L2, and
w1L3. Note that these coefficients store their data according to
the first.last database "fl.dbase$first.last.d", so refer to
them using this.
.RC w0Lconstant
This is the coefficient of the bottom level scaling function coefficient.
So for example, if you used Haar wavelets this would be the sample
mean of the data (scaled by some factor depending on the number
of levels, nlevels).
.RC bc
This component details how the boundaries were treated in the decomposition.
.DT
In previous releases the original image was stored as the "original"
component of a imwd object. This is not done now as the resulting
objects were excessively large.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`imwd', `compress', `draw'.
.KW classes
.KW methods
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'imwdc.object'" '(1870 characters)'
if test -f 'imwdc.object'
then
echo shar: will not over-write existing file "'imwdc.object'"
else
cat << \SHAR_EOF > 'imwdc.object'
.BG D
.FN imwdc.object
.TL
Compressed wavelet decomposition object (2D).
.DN
These are objects of class
.Cs
"imwdc"
.Ce
They represent a compressed decomposition of a 2D function with respect to
a 2D wavelet basis.
.SH GENERATION
This class of objects is returned from the `threshold' and
"compress" functions
to represent a wavelet decomposition of a image (or other 2D function).
.SH METHODS
The `"imwdc"' class of objects has methods for the following generic
functions:
.br
`plot', `threshold', `summary', `print', `draw', `imwr', `uncompress'.
.SH INHERITANCE
None (yet).
.PP
.SH STRUCTURE
The following components must be included in a legitimate `imwdc' object.
.RC nlevels
number of levels in wavelet decomposition. If you raise 2 to the power
of nlevels then you get the dimension of the image that you originally
started with.
.RC fl.dbase
The first last database associated with the decomposition. For images,
this list is not very useful as each level's components is stored as
a list component, rather than being packaged up in a single vector
as in the 1D case. Nevertheless the internals still need to know
about fl.dbase to get the computations correct.
.RC filter
A filter object as returned by "filter.select". This component records
the filter used in the decomposition. The reconstruction routines
use this component to find out what filter to use in reconstruction.
.RC wNLx
These are compressed version of the equivalent imwd versions
.RC w0Lconstant
These are compressed version of the equivalent imwd versions
.RC bc
This component details how the boundaries were treated in the decomposition.
.DT
The imwdc object is the same as a imwd object, except that the wNLx
components have been compressed using `compress.default'.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`imwd', `uncompress', `draw'.
.KW classes
.KW methods
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'imwr'" '(329 characters)'
if test -f 'imwr'
then
echo shar: will not over-write existing file "'imwr'"
else
cat << \SHAR_EOF > 'imwr'
.BG
.FN imwr
.TL
imwr - Generic wavelet function
.DN
Does a 2-dimensional wavelet reconstruction
.GE
imwd, imwdc
.CS
imwr(x, ...)
.RA
.AG x
an S-PLUS object.
.OA
.AG ...
methods may have additional arguments.
.PP
See entry for imwr.imwd
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
imwr.imwd
.KW nonlinear
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'imwr.imwd'" '(1857 characters)'
if test -f 'imwr.imwd'
then
echo shar: will not over-write existing file "'imwr.imwd'"
else
cat << \SHAR_EOF > 'imwr.imwd'
.BG
.FN imwr.imwd
.TL
Discrete wavelet transform for images (reconstruction)
.DN
This functions performs the reconstruction stage of Mallat's
pyramid algorithm, i.e. the inverse discrete wavelet transform
for images.
.CS
imwr.imwd(imwd, bc=imwd$bc, verbose=F)
.RA
.AG imwd
An object of class `imwd'. This type of object is returned by
`imwd'.
.OA
.AG bc
This argument specifies the boundary handling, it is best left to be
the boundary handling specified by that in the supplied imwd.
.AG verbose
If this argument is true then informative messages are printed
detailing the computations to be performed
.RT
A matrix, of dimension determined by the original data set supplied
to the initial decomposition (more precisely, determined by the nlevels
component of the imwd.object). This matrix is the highest resolution
level of the reconstruction. If a `imwd' (decomposition) is followed
immediately by a `imwr' (reconstruction) then the returned matrix
will be exactly the same as the original image.
.SE
None
.DT
Details of the algorithm are to be found in Mallat (1989).
As for "imwd" the algorithm works by applying many 1D reconstruction
algorithms to the coefficients. The filters used are those
described in Daubechies, I (1988).
.ME imwr imwd
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
.sp
Daubechies, I. (1988)
.ul
Orthonormal bases of compactly supported wavelets
Communications on Pure and Applied Mathematics, Vol. 41, 909-996
.sp
Mallat, S. G. (1989)
.ul
A theory for multiresolution signal decomposition: the wavelet representation
IEEE Transactions on Pattern Analysis and Machine Intelligence.
Vol. 11, Number 7 674-693.
.SA
`imwd', `plot', `threshold'
.EX
#
# Do a decomposition, then exact reconstruction
# Look at the error
#
max( abs(imwr(imwd(test.image)) - test.image))
[1] 1.014611e-11
.KW nonlinear
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'imwr.imwdc'" '(668 characters)'
if test -f 'imwr.imwdc'
then
echo shar: will not over-write existing file "'imwr.imwdc'"
else
cat << \SHAR_EOF > 'imwr.imwdc'
.BG
.FN imwr.imwdc
.TL
Use `imwr()' on an `imwdc' object
.DN
Does a 2-dimensional wavelet reconstruction of compressed imwdc object
.CS
imwr.imwdc(wd, ...)
.RA
Object of class "imwdc", maybe from a thresholding using
the threshold function.
.OA
.AG ...
Other arguments to imwr.imwd
.DT
Imwr is a generic function to do the inverse 2-dimensional wavelet transform.
.ME imwr imwdc
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`imwr.imwd', `imwr'
.EX
#
# Decompose an image and then threshold
#
> tdi <- threshold(imwd(myimage))
#
# Now reconstruct, imwr should use imwr.imwdc directly after
# thresholding.
#
> answer <- imwr(tdi)
.KW nonlinear
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'lt.to.name'" '(373 characters)'
if test -f 'lt.to.name'
then
echo shar: will not over-write existing file "'lt.to.name'"
else
cat << \SHAR_EOF > 'lt.to.name'
.BG
.FN lt.to.name
.TL
Convert notations (wavelet)
.DN
Convert level/notation into imwd.object component names
.CS
lt.to.name(level, type)
.RA
.AG level
The level of the decomposition
.AG type
The type, one of CD, DC or DD, reflecting the application of filters
g,h in combination.
.RT
A string in the imwd.object wavelet coefficient name format.
.DT
Not for user use!
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'maybe.load'" '(311 characters)'
if test -f 'maybe.load'
then
echo shar: will not over-write existing file "'maybe.load'"
else
cat << \SHAR_EOF > 'maybe.load'
.BG
.FN maybe.load
.TL
Load wavethresh object code if it's not loaded (wavelet).
.DN
Load wavethresh object code if it's not loaded.
.CS
maybe.load()
.RT
None
.SE
If the symbol "wavedecomp" is not loaded, then load the wavethresh object
code.
.DT
Contributed in release 2.2 by Martin Maechler.
.SA
dyn.load
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'plot.imwd'" '(4813 characters)'
if test -f 'plot.imwd'
then
echo shar: will not over-write existing file "'plot.imwd'"
else
cat << \SHAR_EOF > 'plot.imwd'
.BG
.FN plot.imwd
.TL
Use `plot()' on an `imwd' object
.DN
Images Wavelet Coefficients of a imwd class object
.CS
plot(imwd, scaling="by.level", co.type="abs", package="SPlus",
plot.type="mallat", arrangement=c(3,3), transform=F,
tfunction=sqrt)
.RA
.AG imwd
Object of class "imwd". This contains a wavelet decomposition of an
image.
.OA
.AG scaling
This determines the scaling applied to each sub-image. The values can be
"by.level" which means each image is scaled independently to the whole
dynamic range of the image, otherwise the whole image is scaled as a whole
(as in previous versions). This argument only takes effect when the
plot.type=="mallat".
.AG co.type
This argument specifies a transform to be applied to the coefficients
before display. If co.type=="abs", then the absolute values of the coefficients
will be displayed, if co.type=="mabs", then the negative of the absolute values
will be displayed. These two arguments ensure that large coefficients,
whether positive or negative, will look different to small coefficients.
If co.type=="none", then no transforming will take place (as in previous
releases)
.AG package
The version of S that you are using. The allowed values are "S" or "SPlus".
This argument controls the return value of the function. Since S does not
have the "image" command the coefficient matrix is returned instead of
imaged to a device.
.AG plot.type
If plot.type=="mallat", then the image within image type of plot as
exemplified within Mallat (1989) will be produced. If plot.type=="rows",
then the individual level/orientation plots are produced in an array
according to the arrangement argument.
.AG arrangement
Determines the parameter to pass to mfrow to set up the display for
multiple plots per page. The default, c(3,3) specifies 3 rows and 3 columns.
.AG transform
If this argument is T then the function defined by tfunction is applied to
the coefficients.
.AG tfunction
A transform to apply to the coefficients if transform==T.
.RT
If package is "S" then the matrix of wavelet coefficients packed in
Mallat form is returned.
.SE
An image of the wavelet coefficients is produced if package=SPlus.
.DT
If plot.type=="mallat" then
the picture produced is the same as the one described by
Mallat 1989. After a decomposition there will be exactly the same
number of coefficients as there were pixels in the original image.
This function is a good way of plotting all the coefficients, but it means
that you can't really see some of the lower resolution images very clearly.
If plot.type=="rows", then each sub-image of the decomposition is plotted
at the same size so you can see the lower resolution images. By default
the arrangement of each of the sub-images is on a 3x3 array, so three levels
can fit onto one plot. If you are using a screen device then it may be
desirable to turn on the "ask before you plot" option with dev.ask(),
since the coefficients plot may run to a few pages with the "rows"
plot.type.
It is not always easy to see exactly what's going on, so there are
a few arguments that try to manipulate the (sub)image(s). The scaling
argument only works when the plot.type=="mallat" is in force. If scaling
is by.level then each subimage is scaled independently, if not then
the whole image is scaled at once. The co.type works for both plot
types and causes absolute values, negative absolute values or just
the coefficients to be plotted - this is useful for enhancing various
effects. The most flexible transformation is provided when transform==T,
then the function tfunction is applied to all the coefficients and can
produce some useful contrast enhancing effects.
At each stage of the decomposition we decompose an image into
four images, each of which are half the size of the original
(see "imwd" for what these are). Three of the images correspond
to wavelet coefficients in the horizontal, vertical and diagonal
directions and these form the bottom-right, top-left and
top-right sections of the displayed image. The fourth image
is further decomposed into another four subimages. Three of which
get put into the bottom-right, top-left, and
top-right sections OF THE BOTTOM-LEFT part of the previous image
and so on and so on. It is impossible to explain this properly
in a simple fashion, so the only way to really understand what
is going on is to do it yourself and see what happens.
.ME plot imwd
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
.sp
Mallat, S. G. (1989)
.ul
A theory for multiresolution signal decomposition: the wavelet representation
IEEE Transactions on Pattern Analysis and Machine Intelligence.
Vol. 11, Number 7 674-693.
.SA
`imwd'
.EX
#
# Do an image decomposition
#
tdecomp <- imwd(test.image)
#
# Plot wavelet coefficients
#
plot(tdecomp)
.KW hplot
.KW smooth
.KW nonlinear
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'plot.imwdc'" '(580 characters)'
if test -f 'plot.imwdc'
then
echo shar: will not over-write existing file "'plot.imwdc'"
else
cat << \SHAR_EOF > 'plot.imwdc'
.BG
.FN plot.imwdc
.TL
Use `plot()' on an `imwdc' object
.DN
Images wavelet coefficients of an imwdc class object
.CS
plot.imwdc(wd, ...)
.RA
Object of class "imwdc", maybe from a thresholding using
the threshold function.
.OA
.AG ...
Other arguments to plot.imwd
.PP
.SE
A plot of image coefficients is performed
.ME plot imwdc
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`plot.imwd', `plot'
.EX
#
# Do the wavelet transform of an image
# and then threshold
#
> tdi <- threshold(imwd(myimage))
#
# Now plot the coefficients, uses plot.imwdc
#
> plot(tdi)
.KW hplot
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'plot.wd'" '(3012 characters)'
if test -f 'plot.wd'
then
echo shar: will not over-write existing file "'plot.wd'"
else
cat << \SHAR_EOF > 'plot.wd'
.BG
.FN plot.wd
.TL
Use `plot()' on a `wd' object
.DN
Plots wavelet coefficients of a wd class object.
.CS
plot(wd, xlabels, first.level = 1, main =
"Wavelet Decomposition Coefficients",
scaling="by.level", rhlab=F, sub, ...)
.RA
.AG wd
Object of class "wd". This contains a wavelet decomposition of a function.
.OA
.AG xlabels
If supplied, this argument should be a vector containing the x-axis for
the plot. For example, if you are trying to regress y on x, then you
might want to put "x" in as the x-axis. Otherwise, the translates will
be plotted.
.AG first.level
The determines how many of the low resolution levels are plotted.
The default is for first.level=1, which means that 1 coefficient
is plotted.
.AG main
Title of plot
.AG scaling
The type of scaling applied to levels within the plot. This
can be "by.level" or "global".
.AG rhlab
Determines whether the scale factors applied to each level before plotting
are printed as the right hand axis.
.AG sub
The subtitle of the plot. If this is missing then something useful gets
substituted.
.AG ...
other arguments to be supplied to plot.
.RT
Axis labels to the right of the picture. These values are the maximum
of the absolute value of the coefficients at that resolution level.
They are returned because they are sometimes hard to read on the plot.
.SE
A plot of the wavelet coefficients at each resolution level is produced.
.DT
The picture produced is similar to those in Donoho and Johnstone
1992. Wavelet coefficients for each resolution level are plotted
one above the other, with the high resolution coefficients at the
bottom, and the low resolution at the top. The coefficients are
plotted using the "segment" function, with a large positive coefficient
being plotted above an imaginary horizontal centre line, and a large
negative coefficient plotted below it. The position of a coefficient
along a line is indicative of the wavelet basis function's translate
number.
The resolution levels are labelled on the left-hand side axis,
and the maximum values of the absolute values of the coefficients
for the particular level form the right-hand side axis.
The levels of coefficients can be scaled in two ways. If you are
not interested in comparing the relative scales of coefficients
from different levels, then the default "scaling" option, "by.level"
is what you need. This computes the maximum of the absolute value
of the coefficients at a particular level and scales the so that
the fit nicely onto the plot. For this option, each level is scaled
DIFFERENTLY. To obtain a uniform scale for all the levels specify
the "global" option to the "scaling" argument. This will allow you
to make inter-level comparisons.
.ME plot wd
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
.sp
Donoho, D.L. and Johnstone, I.M.
.ul
Ideal Spatial Adaptation by Wavelet Shrinkage
.SA
`wd'
.EX
#
# Do a decomposition
#
tdecomp <- wd(test.data)
#
# Plot wavelet coefficients
#
plot(tdecomp)
.KW hplot
.KW smooth
.KW nonlinear
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'print.imwd'" '(378 characters)'
if test -f 'print.imwd'
then
echo shar: will not over-write existing file "'print.imwd'"
else
cat << \SHAR_EOF > 'print.imwd'
.BG
.FN print.imwd
.TL
Use `print()' on an `imwd' object
.DN
Prints out information about an imwd object and then calls `summary()'
on it.
.CS
print.imwd(x)
.RA
.AG x
an object of class `imwd'
.DT
Contributed in release 2.2 by Martin Maechler
.SE
Information about the `imwd' object is printed out.
.SA
`imwd', `imwd.object'.
.EX
# You don't really want an example, do you?
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'print.imwdc'" '(386 characters)'
if test -f 'print.imwdc'
then
echo shar: will not over-write existing file "'print.imwdc'"
else
cat << \SHAR_EOF > 'print.imwdc'
.BG
.FN print.imwdc
.TL
Use `print()' on an `imwdc' object
.DN
Prints out information about an imwdc object and then calls `summary()'
on it.
.CS
print.imwdc(x)
.RA
.AG x
an object of class `imwdc'
.DT
Contributed in release 2.2 by Martin Maechler
.SE
Information about the `imwdc' object is printed out.
.SA
`imwdc', `imwdc.object'.
.EX
# You don't really want an example, do you?
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'print.wd'" '(362 characters)'
if test -f 'print.wd'
then
echo shar: will not over-write existing file "'print.wd'"
else
cat << \SHAR_EOF > 'print.wd'
.BG
.FN print.wd
.TL
Use `print()' on an `wd' object
.DN
Prints out information about an wd object and then calls `summary()'
on it.
.CS
print.wd(x)
.RA
.AG x
an object of class `wd'
.DT
Contributed in release 2.2 by Martin Maechler
.SE
Information about the `wd' object is printed out.
.SA
`wd', `wd.object'.
.EX
# You don't really want an example, do you?
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'putC'" '(1607 characters)'
if test -f 'putC'
then
echo shar: will not over-write existing file "'putC'"
else
cat << \SHAR_EOF > 'putC'
.BG
.FN putC
.TL
Put smoothed data into wavelet structure
.DN
Makes a copy of the wd object, replaces some smoothed data in
the copy, and then returns the copy.
.CS
putC(wd, level, v, boundary=F)
.RA
.AG wd
wd class object that is to be copied and have smoothed data replaced
.AG level
The level at which the replacement is to take place
.AG v
The replacement data, this should be of the correct length.
.OA
.AG boundary
If boundary is F then only the "real" data is replaced (and it is
easy to predict the required length of v). If boundary is T then
you can replace the boundary values at a particular level as well
(but it is hard to predict the required length of v, and the information
has to be obtained from the first.last database component of wd).
This argument has no meaning if the wd object was obtained with the bc
periodic boundary handling method.
.RT
A wd class object containing the replaced data.
.SE
None
.DT
The function "accessC" obtains the smoothed data for a particular
level. The function "putC" replaces data at a particular level and
returns a modified wd object reflecting the change.
This function is probably not particularly useful, but it is present
for completeness. It is required because of the pyramidal nature of
the smoothed data points being packed into a vector.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`wd.object', `accessC'
.EX
#
# Put the numbers 1:64 into level 6
#
> new <- putC(old, level=6, v=1:64, boundary=F)
#
# If you look at the C component of new, you will see that
# some numbers have changed at the appropriate position.
#
.KW manip
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'putD'" '(1608 characters)'
if test -f 'putD'
then
echo shar: will not over-write existing file "'putD'"
else
cat << \SHAR_EOF > 'putD'
.BG
.FN putD
.TL
Put wavelet coefficients into wavelet structure
.DN
Makes a copy of the wd object, replaces some wavelet coefficients
into the copy, and then returns the copy.
.CS
putD(wd, level, v, boundary=F)
.RA
.AG wd
wd class object that is to be copied, have wavelet coefficients replaced
and then returned.
.AG level
The level at which replacement is to take place.
.AG v
The replacement coefficients, this should be of the correct length.
.OA
.AG boundary
If boundary is F then only the "real" coefficients are replaced
(and it is easy to predict the required length of the v, just the
correct power of 2). If boundary=T then you can replace the boundary
coefficients as well (but it is hard to predict the required length
of v, and the information has to be extracted from the first.last
database component of wd).
This argument has no meaning if the wd object was obtained using periodic
boundary handling.
.RT
A wd class object containing the replaced data.
.SE
None
.DT
The function "accessD" obtains the wavelet coefficients for a particular
level. The function "putD" replaces coefficients at a particular
level and returns a modified wd object reflecting the change.
As opposed to the utility of "putC", the "putD" function is actually
quite useful. It is fun to replace coefficients, since then you
can dream up your own functions, get pictures of the wavelets etc. etc.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`wd.object', `accessD', `draw'
.EX
#
# Set all the wavelet coefficients to zero
#
> for(i in 0:(wd$nlevels-1))
wd <- putC(wd, level=i, v=rep(0,2^i))
.KW manip
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'summary.imwd'" '(674 characters)'
if test -f 'summary.imwd'
then
echo shar: will not over-write existing file "'summary.imwd'"
else
cat << \SHAR_EOF > 'summary.imwd'
.BG
.FN summary.imwd
.TL
Use `summary()' on an `imwd' object
.CS
summary.imwd(object)
.AG object
The image wavelet decomposition of class "imwd".
This is assumed to be the result of some image wavelet decomposition.
.RT
Nothing
.DT
The number of levels, the dimension of the original image,
name of the filter and the type of boundary handling are printed.
.ME summary imwd
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`summary', `imwd.object'.
.EX
> summary(lwd)
UNcompressed image wavelet decomposition structure
Levels: 8
Original image was 256 x 256 pixels.
Filter was: Daub cmpct on ext. phase N=2
Boundary handling: periodic
.KW nonlinear
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'summary.imwdc'" '(695 characters)'
if test -f 'summary.imwdc'
then
echo shar: will not over-write existing file "'summary.imwdc'"
else
cat << \SHAR_EOF > 'summary.imwdc'
.BG
.FN summary.imwdc
.TL
Use `summary()' on an `imwdc' object
.CS
summary.imwdc(object)
.AG object
The image wavelet decomposition of class "imwdc".
This is assumed to be the result of some image wavelet decomposition
and thresholded.
.RT
Nothing
.DT
The number of levels, the dimension of the original image,
name of the filter and the type of boundary handling are printed.
.ME summary imwdc
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`summary', `imwdc.object'.
.EX
> summary(lwdt)
Compressed image wavelet decomposition structure
Levels: 8
Original image was 256 x 256 pixels.
Filter was: Daub cmpct on ext. phase N=2
Boundary handling: periodic
.KW nonlinear
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'summary.wd'" '(588 characters)'
if test -f 'summary.wd'
then
echo shar: will not over-write existing file "'summary.wd'"
else
cat << \SHAR_EOF > 'summary.wd'
.BG
.FN summary.wd
.TL
Use `summary()' on a `wd' object
.CS
summary.wd(object)
.AG object
The wavelet decomposition of class "wd".
This is assumed to be the result of some wavelet decomposition.
.RT
Nothing
.DT
The number of levels, the length of the original data sequence,
name of the filter and type of boundary handling are printed.
.ME summary wd
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`summary', `wd.object'.
.EX
> summary(tmp)
Levels: 9
Length of original: 512
Filter was: Daub cmpct on ext. phase N=2
Boundary handling: periodic
.KW nonlinear
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'support'" '(1645 characters)'
if test -f 'support'
then
echo shar: will not over-write existing file "'support'"
else
cat << \SHAR_EOF > 'support'
.BG
.FN support
.TL
Compute support of wavelet
.DN
Computes the support of a wavelet
.CS
support(filter.number=2, family="DaubExPhase", m=0, n=0)
.OA
.AG filter.number
The number within the wavelet family whose support you wish to compute
.AG family
The family of wavelets that you wish to use
.AG m
The dilation number
.AG n
The translation number
.RT
A list containing the support of the wavelets. The list contains
the following components:
.RC lh
The left-hand end of the interval of the support of the wavelet.
.RC rh
The right-hand end
.RC psi.lh
The left-hand end of the support of the mother wavelet
.RC psi.rh
The right-hand end
.RC phi.lh
The left-hand end of the support of the scale function (father wavelet)
.RC phi.rh
The right-hand end
.DT
To draw a wavelet it is important to know it's support. This function
provides this information. If a new family of wavelets is added then their
support needs to be determined and this function modified.
This function needn't be called by the user in normal use.
If the wavelet's aren't compactly supported then the support will not
be a simple closed interval!
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
Daubechies, I. (1988)
.ul
Orthonormal bases of compactly supported wavelets
Communications on Pure and Applied Mathematics, Vol. 41, 909-996
.sp
.SH BUGS
As the example shows below, when m=0 and n=0 the lh and rh don't show
the mother wavelet's support, but the wavelet above the mother wavelet.
The calling functions allow for this.
.SA
`draw'
.EX
> support()
$lh:
[1] -2
$rh:
[1] 4
$psi.lh:
[1] -1
$psi.rh:
[1] 2
$phi.lh:
[1] 0
$phi.rh:
[1] 3
.KW dplot
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'threshold'" '(802 characters)'
if test -f 'threshold'
then
echo shar: will not over-write existing file "'threshold'"
else
cat << \SHAR_EOF > 'threshold'
.BG
.FN threshold
.TL
Thresholding wavelets - Generic function
.DN
Applies soft or hard thresholding
.GE
`wd', `imwd'.
.CS
threshold(x, ...)
.RA
.AG x
an S-PLUS object.
.OA
.AG ...
methods may have additional arguments.
.PP
.SE
thresholding is applied to the object and a thresholded version is
returned.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
Donoho, D and Johnstone, I.
.ul
Ideal Spatial Adaptation by Wavelet Shrinkage.
.SA
`wd', `wr', `imwd', `imwr', `wd.object', `wr.object', `compress'
.EX
#
# Do a 1D decomposition
#
tdecomp <- wd(test.data)
#
# Threshold it
#
tdecomp.thresh <- threshold(tdecomp)
#
# Reconstruct from the thresholded coefficients
#
trecons <- wr(tdecomp.thresh)
#
# Plot the reconstructed function
#
plot(accessC(trecons))
.KW smooth
.KW nonlinear
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'threshold.imwd'" '(5024 characters)'
if test -f 'threshold.imwd'
then
echo shar: will not over-write existing file "'threshold.imwd'"
else
cat << \SHAR_EOF > 'threshold.imwd'
.BG
.FN threshold.imwd
.TL
Use `threshold()' on an `imwd' object
.DN
Applies hard or soft thresholding to wavelet decomposition object
"imwd".
.CS
threshold.imwd(imwd, levels=3:(wd$nlevels-1), type="hard",
policy="universal", by.level=F, value=0, dev=var,
verbose=F, return.threshold=F, compression=T)
.RA
.AG imwd
Object of class "imwd", maybe from a wavelet decomposition using
the imwd function.
.OA
.AG levels
vector that determines which levels are thresholded in the decomposition
.AG type
Determines the type of thresholding, this can be "hard" or "soft"
.AG policy
which threshold to use, this can be "universal", "manual", or "probability"
.AG by.level
If F then a global threshold is applied to all the levels specified by
"levels", otherwise a threshold is computed and applied separately to
each level.
.AG value
This is the user-supplied threshold for the manual policy, and
also the user-supplied quantile level for the probability policy.
.AG dev
Deviance measure. The default is to use the var() function, but you
can insert your own measure of deviance.
.AG verbose
If true then threshold spurts informative messages at you.
.AG return.threshold
If this is true then the actual threshold is returned, otherwise
the thresholded object is returned.
.AG compression
If true the thresholded object is compressed and then returned,
otherwise the thresholded object is returned.
.RT
If compression==T then an object of class "imwdc" that has been thresholded
and compressed is returned, otherwise an uncompressed, but thresholded,
object of class "imwd" is returned.
.SE
None
.DT
Thresholding modifies the coefficients within a imwd wavelet
decomposition object. The modification can be performed either
with a "hard" or "soft" thresholding selected by the "type" argument.
Hard thresholding simply compares a coefficient with a threshold.
If it is larger in absolute magnitude it is left alone, if it is
smaller it is set to zero.
The "soft threshold" formula is
.sp
soft(w) = sgn(w)*max(|w| - t, 0)
.sp
where "w" is the wavelet coefficient, "t" is the threshold and
"sgn" is the sign of w. Soft thresholding causes w to be replaced
by soft(w).
There are many ways that the threshold can be computed,
we term this the "policy". A universal policy computes
a threshold based on Donoho and Johnstone's "universal thresholds".
The threshold is sqrt(2*log(n))*noise, where noise is computed
as sqrt(dev(w)), i.e. some measure of the variability of the coefficients,
and n is the number of data points (or number of wavelet coefficients).
By default "dev" is "var", so the noise level is estimated using the
sample standard deviation. You can use any other such estimate by writing
your own function and supplying it as the "dev" argument. For example
you could create the function "myvar" by
.sp
myvar <- function(d)
mad(d)^2
.sp
This computes the square of the mean absolute deviation of the data.
It is squared because "dev" should be a measure on the variance scale,
not the standard deviation (you know what I mean).
If you make the "by.levels" argument T, then a separate threshold is
computed for each level in the "levels" vector. This means that
the variance is estimated separately for each level.
The "manual" policy is simple. You supply a threshold value ("value")
and hard or soft thresholding is performed using that value.
The "value" argument is a vector. If it is of length 1 then
it is replicated to be the same length as the "levels" vector,
otherwise it is repeated as many times as necessary to be the
length vector's length. In this way, different thresholds can be
given for different levels. Note that the "by.level" argument has
no effect with this policy.
The "probability" policy works as follows. All coefficients that
are smaller than the "value"th quantile of the coefficients are
set to zero. If "by.level" is false, then the quantile is computed
for all coefficients in the levels specified by the "levels" vector;
if "by.level" is true, then each level's quantile is estimated separately.
The thresholding process forces many coefficients to zero. From release 2.2
onwards the thresholded "imwd" object is compressed by "compress"
and returned as a much smaller "imwdc" object. An "imwdc" object is
easily converted into an "imwd" object by "uncompress", but all relevant
functions will handle the "imwdc" object.
Note that the coefficients for the horizontal, diagonal and vertical
detail at a particular level are treated as one. In future releases
we may add the capability to treat the details differently, although
this would increase the complexity of the argument specification.
.ME threshold imwd
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
Donoho, D and Johnstone, I.
.sp
Ideal Spatial Adaption by Wavelet Shrinkage.
.SH BUGS
There should be an optimal policy as well, although universal comes
close.
.SA
`imwr', `imwd', `compress', `uncompress', `imwd.object', `imwdc.object'
.EX
See example for threshold
.KW nonlinear
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'threshold.imwdc'" '(1302 characters)'
if test -f 'threshold.imwdc'
then
echo shar: will not over-write existing file "'threshold.imwdc'"
else
cat << \SHAR_EOF > 'threshold.imwdc'
.BG
.FN threshold.imwdc
.TL
Use `threshold()' on an `imwdc' object
.DN
Applies hard or soft thresholding to wavelet decomposition object
"imwd".
.CS
threshold.imwdc(imwdc, verbose=F, ...)
.RA
.AG imwdc
Object of class "imwdc", maybe after a previous thresholding.
.OA
.AG verbose
print information messages about what's going on
.AG ...
other arguments to threshold
.RT
An object of class "imwdc", containing the thresholded object
.DT
This function thresholds just like ``threshold.imwd'', except that the
input is an "imwdc" object, not a "imwd" object, which suggests that
the object has already been thresholded (because threshold.imwd
returns an "imwdc" object). Because the object is likely to have
been thresholded a warning message stating this is printed.
However, it is entirely possible that you would wish to impose a higher
threshold on an already thresholded object, and so this function does
just this.
.ME threshold imwdc
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
Donoho, D and Johnstone, I.
.sp
Ideal Spatial Adaption by Wavelet Shrinkage.
.SH BUGS
There should be an optimal policy as well, although universal comes
close.
.SA
`imwr', `imwd', `compress', `uncompress', `imwd.object', `imwdc.object'
.EX
See example for threshold
.KW nonlinear
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'threshold.wd'" '(4211 characters)'
if test -f 'threshold.wd'
then
echo shar: will not over-write existing file "'threshold.wd'"
else
cat << \SHAR_EOF > 'threshold.wd'
.BG
.FN threshold.wd
.TL
Use `threshold()' on a `wd' object
.DN
Applies hard or soft thresholding to wavelet decomposition object
"wd.object".
.CS
threshold.wd(wd, levels=3:(wd$nlevels-1), type="hard",
policy="universal", by.level=F, value=0, dev=var, boundary=F,
verbose=F, return.threshold=F)
.RA
Object of class "wd", maybe from a wavelet decomposition using
the wd function.
.OA
.AG levels
vector that determines which levels are thresholded in the decomposition
.AG type
Determines the type of thresholding, this can be "hard" or "soft"
.AG policy
which threshold to use, this can be "universal", "manual", or "probability"
.AG by.level
If F then a global threshold is applied to all the levels specified by
"levels", otherwise a threshold is computed and applied separately to
each level.
.AG value
This is the user-supplied threshold for the manual policy, and
also the user-supplied quantile level for the probability policy.
.AG dev
Deviance measure. The default is to use the var() function, but you
can insert your own measure of deviance.
.AG boundary
If this is true then the boundary correction values are included for
thresholding, if it is false then they are not.
.AG verbose
If true then threshold spurts informative messages at you.
.AG return.threshold
If this is true then the actual threshold is returned, otherwise
the thresholded object is returned.
.RT
An object of class "wd" that has been thresholded.
.SE
None
.DT
Thresholding modifies the coefficients within a wd wavelet
decomposition object. The modification can be performed either
with a "hard" or "soft" thresholding selected by the "type" argument.
Hard thresholding simply compares a coefficient with a threshold.
If it is larger in absolute magnitude it is left alone, if it is
smaller it is set to zero.
The "soft threshold" formula is
.sp
soft(w) = sgn(w)*max(|w| - t, 0)
.sp
where "w" is the wavelet coefficient, "t" is the threshold and
"sgn" is the sign of w. Soft thresholding causes w to be replaced
by soft(w).
There are many ways that the threshold can be computed,
we term this the "policy". A universal policy computes
a threshold based on Donoho and Johnstone's "universal thresholds".
The threshold is sqrt(2*log(n))*noise, where noise is computed
as sqrt(dev(w)), i.e. some measure of the variability of the coefficients,
and n is the number of data points (or number of wavelet coefficients).
By default "dev" is "var", so the noise level is estimated using the
sample standard deviation. You can use any other such estimate by writing
your own function and supplying it as the "dev" argument. For example
you could create the function "myvar" by
.sp
myvar <- function(d)
mad(d)^2
.sp
This computes the square of the mean absolute deviation of the data.
It is squared because "dev" should be a measure on the variance scale,
not the standard deviation (you know what I mean).
If you make the "by.levels" argument T, then a separate threshold is
computed for each level in the "levels" vector. This means that
the variance is estimated separately for each level.
The "manual" policy is simple. You supply a threshold value ("value")
and hard or soft thresholding is performed using that value.
The "value" argument is a vector. If it is of length 1 then
it is replicated to be the same length as the "levels" vector,
otherwise it is repeated as many times as necessary to be the
length vector's length. In this way, different thresholds can be
given for different levels. Note that the "by.level" argument has
no effect with this policy.
The "probability" policy works as follows. All coefficients that
are smaller than the "value"th quantile of the coefficients are
set to zero. If "by.level" is false, then the quantile is computed
for all coefficients in the levels specified by the "levels" vector;
if "by.level" is true, then each level's quantile is estimated separately.
.ME threshold wd
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
Donoho, D and Johnstone, I.
.sp
Ideal Spatial Adaption by Wavelet Shrinkage.
.SH BUGS
There should be an optimal policy as well, although universal comes
close.
.SA
`wr', `wd'
.EX
See example for threshold
.KW nonlinear
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'uncompress'" '(401 characters)'
if test -f 'uncompress'
then
echo shar: will not over-write existing file "'uncompress'"
else
cat << \SHAR_EOF > 'uncompress'
.BG
.FN uncompress
.TL
Decompression - Generic function (wavelet)
.DN
Decompresses object by padding with zeroes
.GE
`imwdc', `default'
.CS
uncompress(x, ...)
.RA
.AG x
an S-PLUS object
.OA
.AG ...
methods may have additional arguments.
.RT
a uncompressed version of the supplied argument
.DT
See individual help pages for operation
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`compress'
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'uncompress.default'" '(1590 characters)'
if test -f 'uncompress.default'
then
echo shar: will not over-write existing file "'uncompress.default'"
else
cat << \SHAR_EOF > 'uncompress.default'
.BG
.FN uncompress.default
.TL
Decompress a compressed or uncompressed object
.DN
The `compress()' function compresses a sparse vector by removing zeroes.
This function performs the inverse transformation of `compress()'
.CS
uncompress.default(v, verbose=F)
.RA
.AG v
The `compressed' or `uncompressed' object to uncompress.
.OA
.AG verbose
IF true then the routine prints informative messages.
.RT
The uncompressed vector.
.DT
See the `compress.default()' help to see what this function does to vectors.
The `uncompress.default()' function takes objects that have been output
from `compress.default()' and restores them to their original values.
If the input to `compress.default()' was a sparse vector, then an object
of class `compressed' would be returned. This class of object (described in
`compressed.object') is a list containing the position and values of
all the non-zero elements of the original vector. The `uncompress.default()'
function reconstitutes the original vector.
Sometimes compression is not worthwhile (i.e. the original vector is not
sparse enough). In this case `compress.default()' returns the original
vector in a list with class `uncompressed'. The `uncompress.default()'
function's task in this case is simple, it only need return the
vector part of `uncompressed''.
.SA
`compress'
.EX
#
# Compress a sparse vector
#
> cv <- compress( c(rep(0,100),564) )
#
# Look at the class of cv
#
> class(cv)
[1] "compressed"
#
# Uncompress the vector, (uncompress.default is used)
#
> uncompress(cv)
[1] 0 0 0 0 0 ....
....
....
[91] 0 0 0 ..... 0 0 564
.KW manip
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'uncompress.imwdc'" '(563 characters)'
if test -f 'uncompress.imwdc'
then
echo shar: will not over-write existing file "'uncompress.imwdc'"
else
cat << \SHAR_EOF > 'uncompress.imwdc'
.BG
.FN uncompress.imwdc
.TL
Use `uncompress()' on an `imwdc' object
.DN
Decompresses a imwdc object by padding with zero elements
.CS
uncompress.imwdc(imwdc, verbose=F)
.RA
.AG imwdc
Object to decompress must be of class "imwdc".
.OA
.AG verbose
If this is true then report on compression activity
.RT
An object of type "imwd" representing the decompressed imwdc object.
.DT
This uncompresses a "imwdc" object back into a "imwd" object.
.ME uncompress imwdc
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`uncompress.default', `imwdc.object'
.KW manip
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'uncompressed.object'" '(756 characters)'
if test -f 'uncompressed.object'
then
echo shar: will not over-write existing file "'uncompressed.object'"
else
cat << \SHAR_EOF > 'uncompressed.object'
.BG D
.FN uncompressed.object
.TL
Uncompressed object (wavelet)
.DN
These are objects of classes
.Cs
"uncompressed"
.Ce
They are lists with components representing a vector
.SH GENERATION
By compress.default
.SH METHODS
The `"uncompressed"' class of objects has methods for the following generic
functions:
.br
`uncompress.default'
.SH INHERITANCE
None (yet).
.PP
.SH STRUCTURE
The following components must be included in a legitimate `uncompressed' object.
.RC vector
the original uncompressed vector
.DT
The compress.default function tries to compress a vector, if it can
it produces an object of type "compressed", otherwise it produces
an object of type "uncompressed".
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`compress'.
.KW classes
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'wd'" '(6011 characters)'
if test -f 'wd'
then
echo shar: will not over-write existing file "'wd'"
else
cat << \SHAR_EOF > 'wd'
.BG
.FN wd
.TL
Discrete wavelet transform (decomposition).
.DN
This function performs the decomposition stage of Mallat's pyramid algorithm
(Mallat 1989),
i.e. the discrete wavelet transform. The actual transform is performed
by some C code, this is dynamically linked into S (if your machine can
do this, if it can't then tough!).
.CS
wd(data, filter.number=2, family="DaubExPhase", bc="periodic",
verbose=F)
.RA
.AG data
A vector containing the data you wish to decompose. The length of this
vector must be a power of 2.
.OA
.AG filter.number
This selects the wavelet that you want to use in the decomposition.
By default this is 2, the Daubechies orthonormal compactly supported
wavelet N=2 (Daubechies (1988)), extremal phase family.
.AG family
specifies the family of wavelets that you want to use. The options are
"DaubExPhase" and "DaubLeAsymm". The "DaubExPhase" wavelets were provided
with previous versions of wavethresh.
.AG bc
specifies the boundary handling. If bc=="periodic" the default, then the
function you decompose is assumed to be periodic on it's interval of definition,
if bc=="symmetric" then the function beyond its boundaries is assumed to
be a symmetric reflection of the function in the boundary. The symmetric
option was the implicit default in releases previous to 2.2.
.AG verbose
Controls the printing of "informative" messages whilst the computations
progress. Such messages are generally annoying so it is turned off by default.
.RT
An object of class `wd'. See "wd.object" for details.
Basically, this object is a list with the following components
.RC C
Vector of sets of successively smoothed data. The pyramid structure of Mallat
is stacked so that it fits into a vector. The function `accessC'
should be used to extract a set for a particular level.
.RC D
Vector of sets of wavelet coefficients at different resolution levels.
Again, Mallat's pyramid structure is stacked into a vector.
The function `accessD' should be used to extract the coefficients
for a particular resolution level.
.RC nlevels
The number of resolution levels. This depends on the length of the data
vector. If length(data)=2^m, then there will be m resolution levels.
This means there will be m levels of wavelet coefficients (indexed
0,1,2,...,(m-1)), and m+1 levels of smoothed data (indexed 0,1,2,...,m).
.RC fl.dbase
There is more information stored in the C and D than is described above.
In the decomposition ``extra'' coefficients are generated that help take
care of the boundary effects, this database lists where these start and
finish, so the "true" data can be extracted.
.RC filter
A list containing information about the filter
.RC bc
How the boundaries were handled.
.SE
The appropriate C object code is loaded if it isn't.
.DT
The code implements Mallat's pyramid algorithm (Mallat 1989).
Essentially it works like this: you start off with some data cm,
which is a real vector of length 2^m, say.
Then from this you obtain two vectors of length 2^(m-1). One of these
is a set of smoothed data, c(m-1), say. This looks like a smoothed version
of cm. The other is a vector, d(m-1), say.
This corresponds to the detail removed
in smoothing cm to c(m-1). More precisely, they are the coefficients of the
wavelet expansion corresponding to the highest resolution wavelets in the
expansion. Similarly, c(m-2) and d(m-2) are obtained from c(m-1), etc. until
you reach c0 and d0.
All levels of smoothed data are stacked into a single vector for memory
efficiency and ease of transport across the SPlus-C interface.
The smoothing is performed directly by convolution with the wavelet filter
(filter.select(n)$H, essentially low-pass filtering), and then dyadic
decimation (selecting every other datum, see Vaidyanathan (1990)).
The detail extraction is performed by the mirror filter of H, which
we call G and is a bandpass filter.
G and H are also known quadrature mirror filters.
There are now two methods of handling "boundary problems". If you know
that your function is periodic (on it's interval) then use the
bc="periodic" option, if you think that the function is symmetric
reflection about each boundary then use bc="symmetric".
If you don't know then it is wise to experiment with both methods,
in any case, if you don't have very much data don't infer too much about
your decomposition! If you have loads of data then don't infer too much
about the boundaries. It can be easier to interpret the wavelet coefficients
from a bc="periodic" decomposition, so that is now the default.
Numerical Recipes implements some of the wavelets code, in particular
we have compared our code to "wt1" and "daub4" on page 595. We are
pleased to announce that our code gives the same answers!
The only difference that you might notice is that one of the coefficients,
at the beginning or end of the decomposition,
always appears in the "wrong" place. This is not so, when you assume
periodic boundaries you can imagine the function defined on a circle
and you can basically place the coefficient at the beginning or the
end (because there is no beginning or end, as it were).
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
Any book on wavelets, especially
.sp
Chui, C. K. (1992)
.ul
An Introduction to Wavelets.
Academic Press, London.
.sp
Daubechies, I. (1988)
.ul
Orthonormal bases of compactly supported wavelets
Communications on Pure and Applied Mathematics, Vol. 41, 909-996
.sp
Mallat, S. G. (1989)
.ul
A theory for multiresolution signal decomposition: the wavelet representation
IEEE Transactions on Pattern Analysis and Machine Intelligence.
Vol. 11, Number 7 674-693.
.sp
Vaidyanathan, P. P. (1990)
.ul
Multirate digital filters, filter banks, polyphase networks and applications:
a tutorial.
Proceedings of the IEEE, Vol. 78, Number 1, 56-93
.SA
`wr', `accessC', `accessD', `filter.select', `plot', `dyn.load'
`threshold'
.EX
#
# Decompose test.data and plot the wavelet coefficients
#
> wds <- wd(test.data)
> plot(wds)
.KW math
.KW smooth
.KW nonlinear
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'wd.object'" '(2358 characters)'
if test -f 'wd.object'
then
echo shar: will not over-write existing file "'wd.object'"
else
cat << \SHAR_EOF > 'wd.object'
.BG D
.FN wd.object
.TL
Wavelet decomposition object (1D)
.DN
These are objects of classes
.Cs
"wd"
.Ce
They represent a decomposition of a function with respect to
a wavelet basis.
.SH GENERATION
This class of objects is returned from the `wd' function
to represent a wavelet decomposition of a function.
Other functions also return a `wd.object'
.SH METHODS
The `"wd"' class of objects has methods for the following generic
functions:
.br
`plot', `threshold', `summary', `print', `draw'.
.SH INHERITANCE
None (yet).
.PP
.SH STRUCTURE
The following components must be included in a legitimate `wd' object.
.RC C
a vector containing each level's smoothed data.
The wavelet transform works by applying both a smoothing filter
and a bandpass filter to the previous level's smoothed data.
The top level contains data at the highest resolution level.
Each of these levels are stored one after the other in this vector.
The matrix `fl.dbase$first.last.c' determines exactly where each
level is stored in the vector.
.RC D
wavelet coefficients.
If you were to write down the discrete wavelet transform of a function
then these D would be the coefficients of the wavelet basis functions.
Like the C, they are also formed in a pyramidal manner, but stored
in a linear array. The storage details are to be found in
`fl.dbase$first.last.d'.
.RC nlevels
The number of levels in the pyramidal decomposition that produces
the coefficients. If you raise 2 to the power of nlevels you get the
number of data points used in the decomposition.
.RC fl.dbase
The first last database associated with this decomposition.
This is a list consisting of 2 integers, and 2 matrices. The matrices
detail how the coefficients are stored in the C and D components
of the `wd.object'. See the help on "first.last" for more information.
.RC filter
a list containing the details of the filter that did the decomposition
.RC bc
how the boundaries were handled
.DT
To retain your sanity the C and D coefficients should be extracted
by the accessC and accessD functions and put using
the putC and putD functions, rather than
by the `$' operator.
Mind you, if you want to muck about with coefficients directly,
then you'll have to do it yourself by working out what the fl.dbase
list means.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
`wd'.
.KW classes
.KW methods
.KW smooth
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'wr'" '(2866 characters)'
if test -f 'wr'
then
echo shar: will not over-write existing file "'wr'"
else
cat << \SHAR_EOF > 'wr'
.BG
.FN wr
.TL
Discrete wavelet transform (reconstruction).
.DN
This function performs the reconstruction stage of Mallat's pyramid algorithm
(Mallat 1989),
i.e. the discrete inverse wavelet transform. The actual transform is performed
by some C code, this is dynamically linked into S (if your machine can
do this).
.CS
wr(wd.object, start.level=0, verbose=F, return.object=F)
.RA
.AG wd.object
A wavelet decomposition object as returned by `wd', and described in
the help for that function and the help for "wd.object".
.OA
.AG start.level
The level you wish to start reconstruction at. The is usually the first
(level 0).
.AG verbose
Controls the printing of "informative" messages whilst the computations
progress. Such messages are generally annoying so it is turned off by default.
.AG return.object
If this is F then the top level of the reconstruction is returned
(this is the reconstructed function at the highest resolution).
Otherwise if it is T the whole wd reconstructed object is returned.
.RT
Either a vector containing the top level reconstruction or
an object of class "wd" containing the results of the reconstruction,
details to be found in help for "wd.object".
.SE
The appropriate C object code is loaded if it isn't.
.DT
The code implements Mallat's pyramid algorithm (Mallat 1989).
In the reconstruction the quadrature mirror filters G and H are
supplied with c0 and d0, d1, ... d(m-1) (the wavelet coefficients)
and rebuild c1,..., cm.
If wd.object was obtained directly from wd then the original function
can be reconstructued EXACTLY as cm and can be sought with
> accessC(wd.object, level=wd.object$levels)
Usually, the wd.object has been modified in some way, for example,
some coefficients set to zero by threshold. Wr then reconstructs
the function with that set of wavelet coefficients.
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SH REFERENCES
Any book on wavelets, especially
.sp
Chui, C. K. (1992)
.ul
An Introduction to Wavelets.
Academic Press, London.
.sp
Daubechies, I. (1988)
.ul
Orthonormal bases of compactly supported wavelets
Communications on Pure and Applied Mathematics, Vol. 41, 909-996
.sp
Mallat, S. G. (1989)
.ul
A theory for multiresolution signal decomposition: the wavelet representation
IEEE Transactions on Pattern Analysis and Machine Intelligence.
Vol. 11, Number 7 674-693.
.SA
`wd', `accessC', `accessD', `filter.select', `plot', `dyn.load'
`threshold'
.EX
#
# Decompose and then exactly reconstruct test.data
#
> tdecomp <- wd(test.data)
> trecons <- wr(tdecomp)
> reconstructed <- accessC(trecons, level=trecons$levels)
#
# Look at accuracy of reconstruction
> max(abs(reconstructed - test.data))
[1] 1.481482e-12
#
# Reconstruct a hard.thresholded object, look at the wavelet coefficients
#
> trecons <- wr(threshold(tdecomp, type="hard") )
> plot(trecons)
.KW math
.KW smooth
.KW nonlinear
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'wvrelease'" '(760 characters)'
if test -f 'wvrelease'
then
echo shar: will not over-write existing file "'wvrelease'"
else
cat << \SHAR_EOF > 'wvrelease'
.BG
.FN wvrelease
.TL
Identify version of wavelet software.
.DN
Prints a message identifying the version of the wavelet software.
.CS
wvrelease()
.RT
None
.SE
Prints a message identifying the version of the wavelet software.
.DT
When attaching the wavelet software directory to your own data directory
it is useful to put a "wvrelease()" function call just after the
attach. This ensures that you have attached to the correct directory
and that you are using the most up to date software.
(This function is mainly useful to those of you that have received
earlier versions of this software).
.SH RELEASE
Release 2.2
Copyright Guy Nason 1993
.SA
.EX
> wvrelease()
S wavelet software, release 2.2, installed
Copyright Guy Nason 1993
.KW smooth
.KW nonlinear
.WR
SHAR_EOF
fi # end of overwriting check
echo shar: extracting "'.Help.find.sum'" '(3039 characters)'
if test -f '.Help.find.sum'
then
echo shar: will not over-write existing file "'.Help.find.sum'"
else
cat << \SHAR_EOF > '.Help.find.sum'
accessC~~~Get smoothed data from wavelet structure.~~~manip
accessD~~~Get wavelet expansion coefficients from wavelet structure.~~~manip
bwimage.colors~~~Sets up X11 colours to correspond to PostScript image prints (wavelet).~~~sysdata
compress.default~~~Compress a vector (wavelet)~~~manip
compress.imwd~~~Use `compress()' on an `imwd' object~~~manip
compressed.object~~~Compressed object (wavelet)~~~classes
compress~~~Compression - Generic function (wavelet)~~~
dof~~~Compute degrees of freedom (wavelet)~~~models
draw.default~~~Draw a picture of a wavelet~~~hplot
draw.imwdc~~~Use `draw()' on an `imwdc' object~~~hplot
draw.imwd~~~Use `draw()' on an `imwd' object~~~hplot
draw.wd~~~Use `draw()' on a `wd' object~~~hplot
draw~~~Draw - Generic wavelet function~~~hplot
filter.select~~~Provide wavelet filter coefficients.~~~math smooth
first.last~~~Build a first/last database for wavelet transforms.~~~misc utilities
imwd.object~~~Wavelet decomposition object (2D).~~~classes methods smooth
imwdc.object~~~Compressed wavelet decomposition object (2D).~~~classes methods smooth
imwd~~~Discrete wavelet transform for images (decomposition)~~~smooth nonlinear
imwr.imwdc~~~Use `imwr()' on an `imwdc' object~~~nonlinear smooth
imwr.imwd~~~Discrete wavelet transform for images (reconstruction)~~~nonlinear smooth
imwr~~~imwr - Generic wavelet function~~~nonlinear smooth
lt.to.name~~~Convert notations (wavelet)~~~
maybe.load~~~Load wavethresh object code if it's not loaded (wavelet).~~~
plot.imwdc~~~Use `plot()' on an `imwdc' object~~~hplot
plot.imwd~~~Use `plot()' on an `imwd' object~~~hplot smooth nonlinear
plot.wd~~~Use `plot()' on a `wd' object~~~hplot smooth nonlinear
print.imwdc~~~Use `print()' on an `imwdc' object~~~
print.imwd~~~Use `print()' on an `imwd' object~~~
print.wd~~~Use `print()' on an `wd' object~~~
putC~~~Put smoothed data into wavelet structure~~~manip
putD~~~Put wavelet coefficients into wavelet structure~~~manip
summary.imwdc~~~Use `summary()' on an `imwdc' object~~~nonlinear smooth
summary.imwd~~~Use `summary()' on an `imwd' object~~~nonlinear smooth
summary.wd~~~Use `summary()' on a `wd' object~~~nonlinear smooth
support~~~Compute support of wavelet~~~dplot
threshold.imwdc~~~Use `threshold()' on an `imwdc' object~~~nonlinear smooth
threshold.imwd~~~Use `threshold()' on an `imwd' object~~~nonlinear smooth
threshold.wd~~~Use `threshold()' on a `wd' object~~~nonlinear smooth
threshold~~~Thresholding wavelets - Generic function~~~smooth nonlinear
uncompress.default~~~Decompress a compressed or uncompressed object~~~manip
uncompress.imwdc~~~Use `uncompress()' on an `imwdc' object~~~manip
uncompressed.object~~~Uncompressed object (wavelet)~~~classes
uncompress~~~Decompression - Generic function (wavelet)~~~
wd.object~~~Wavelet decomposition object (1D)~~~classes methods smooth
wd~~~Discrete wavelet transform (decomposition).~~~math smooth nonlinear
wr~~~Discrete wavelet transform (reconstruction).~~~math smooth nonlinear
wvrelease~~~Identify version of wavelet software.~~~smooth nonlinear
SHAR_EOF
fi # end of overwriting check
echo shar: done with directory "'.Help'"
cd ..
echo shar: extracting "'.Audit'" '(2408 characters)'
if test -f '.Audit'
then
echo shar: will not over-write existing file "'.Audit'"
else
cat << \SHAR_EOF > '.Audit'
#~New session: Version: S Wed Oct 21 14:16:16 PDT 1992, Time: Wed Jul 7 14:24:34 1993
#~get "/usr/local/SPlus3.1.1/splus/.Datasets/..min.script.id" 719540429 ...
#~get "/usr/local/SPlus3.1.1/splus/.Datasets/.Copyright" 719540429 ...
#~get "/usr/local/SPlus3.1.1/splus/.Datasets/version" 719540430 ...
#~
maybe.load <- function()
if(!is.loaded(C.symbol("wavedecomp"))) dyn.load("YOURDIR/wd2d.o")
#~put ".Data/maybe.load" 742051403 ...
#~
dump(list = ls(), file = "Source")
#~get ".Data/bwimage.colors" 742051359 ...
#~get ".Data/last.dump" 742051364 ...
#~get ".Data/wdobjectdir" 742051368 ...
#~get ".Data/wdobjectname" 742051368 ...
#~put ".Data/.Last.value" 742051413 ...
#~
remove(ls())
#~remove ".Data/.First" 0 ...
#~remove ".Data/.Last.value" 0 ...
#~remove ".Data/accessC" 0 ...
#~remove ".Data/accessD" 0 ...
#~remove ".Data/bwimage.colors" 0 ...
#~remove ".Data/compress" 0 ...
#~remove ".Data/compress.default" 0 ...
#~remove ".Data/compress.imwd" 0 ...
#~remove ".Data/dof" 0 ...
#~remove ".Data/draw" 0 ...
#~remove ".Data/draw.default" 0 ...
#~remove ".Data/draw.imwd" 0 ...
#~remove ".Data/draw.imwdc" 0 ...
#~remove ".Data/draw.wd" 0 ...
#~remove ".Data/example.1" 0 ...
#~remove ".Data/filter.select" 0 ...
#~remove ".Data/first.last" 0 ...
#~remove ".Data/imwd" 0 ...
#~remove ".Data/imwr" 0 ...
#~remove ".Data/imwr.imwd" 0 ...
#~remove ".Data/imwr.imwdc" 0 ...
#~remove ".Data/last.dump" 0 ...
#~remove ".Data/lt.to.name" 0 ...
#~remove ".Data/maybe.load" 0 ...
#~remove ".Data/plot.imwd" 0 ...
#~remove ".Data/plot.imwdc" 0 ...
#~remove ".Data/plot.wd" 0 ...
#~remove ".Data/print.imwd" 0 ...
#~remove ".Data/print.imwdc" 0 ...
#~remove ".Data/print.wd" 0 ...
#~remove ".Data/putC" 0 ...
#~remove ".Data/putD" 0 ...
#~remove ".Data/summary.imwd" 0 ...
#~remove ".Data/summary.imwdc" 0 ...
#~remove ".Data/summary.wd" 0 ...
#~remove ".Data/support" 0 ...
#~remove ".Data/threshold" 0 ...
#~remove ".Data/threshold.imwd" 0 ...
#~remove ".Data/threshold.imwdc" 0 ...
#~remove ".Data/threshold.wd" 0 ...
#~remove ".Data/uncompress" 0 ...
#~remove ".Data/uncompress.default" 0 ...
#~remove ".Data/uncompress.imwdc" 0 ...
#~remove ".Data/wd" 0 ...
#~remove ".Data/wdobjectdir" 0 ...
#~remove ".Data/wdobjectname" 0 ...
#~remove ".Data/wr" 0 ...
#~remove ".Data/wvrelease" 0 ...
#~
help.findsum(".Data")
#~put ".Data/.Last.value" 742051436 ...
#~
#~End session: Time: 742051516; Process: 14306
SHAR_EOF
fi # end of overwriting check
echo shar: done with directory "'.Data'"
cd ..
echo shar: done with directory "'DISTRIB2.2'"
cd ..
# End of shell archive
exit 0