### $Id: nls.R,v 1.13 2001/05/29 10:06:34 maechler Exp $
###
###            Nonlinear least squares for R
###
### Copyright 1999-1999 Saikat DebRoy <saikat$stat.wisc.edu>,
###                     Douglas M. Bates <bates$stat.wisc.edu>,
###                     Jose C. Pinheiro <jcp$research.bell-labs.com>
###
### This file is part of the nls library for R and related languages.
### It is made available under the terms of the GNU General Public
### License, version 2, or at your option, any later version,
### incorporated herein by reference.
###
### This program is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied
### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
### PURPOSE.  See the GNU General Public License for more
### details.
###
### You should have received a copy of the GNU General Public
### License along with this program; if not, write to the Free
### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
### MA 02111-1307, USA

### Force loading of the nls dynamic library in R
.First.lib <- function(lib, pkg) library.dynam( "nls", pkg, lib )

numericDeriv <- function(expr, theta, rho = parent.frame()) {
    val <- .Call("numeric_deriv", expr, theta, rho, PACKAGE="nls")
    valDim <- dim(val)
    if (!is.null(valDim)) {
        if (valDim[length(valDim)] == 1)
            valDim <- valDim[-length(valDim)]
        if(length(valDim) > 1)
            dim(attr(val, "gradient")) <- c(valDim,
                                            dim(attr(val, "gradient"))[-1])
    }
    val
}

nlsModel.plinear <- function( form, data, start ) {
    thisEnv <- environment()
    env <- new.env()
    for( i in names( data ) ) {
        assign( i, data[[i]], envir = env )
    }
    ind <- as.list( start )
    p2 <- 0
    for( i in names( ind ) ) {
        temp <- start[[ i ]]
        storage.mode( temp ) <- "double"
        assign( i, temp, envir = env )
        ind[[ i ]] <- p2 + seq( along = start[[ i ]] )
        p2 <- p2 + length( start[[ i ]] )
    }
    lhs <- eval( form[[2]], envir = env )
    storage.mode( lhs ) <- "double"
    rhs <- eval( form[[3]], envir = env )
    storage.mode( rhs ) <- "double"
    p1 <- if( is.matrix(rhs) ) { ncol(rhs) } else { 1 }
    p <- p1 + p2
    n <- length(lhs)
    fac <- ( n -  p )/p
    cc <- QR.B <- NA
    useParams <- rep(TRUE, p2)
    if( is.null( attr( rhs, "gradient" ) ) ) {
        getRHS.noVarying <- function()
          numericDeriv(form[[3]], names(ind), env)
        getRHS <- getRHS.noVarying
        rhs <- getRHS()
    } else {
        getRHS.noVarying <- function()
          eval( form[[3]], envir = env )
        getRHS <- getRHS.noVarying
    }
    dimGrad <- dim(attr(rhs, "gradient"))
    marg <- length(dimGrad)
    if(marg > 0) {
        gradSetArgs <- vector("list", marg+1)
        for(i in 2:marg)
          gradSetArgs[[i]] <- rep(TRUE, dimGrad[i-1])
        useParams <- rep(TRUE, dimGrad[marg])
    } else {
        gradSetArgs <- vector("list", 2)
        useParams <- rep(TRUE, length(attr(rhs, "gradient")))
    }
    gradSetArgs[[1]] <- (~attr(ans, "gradient"))[[2]]
    gradCall <-
      switch(length(gradSetArgs) - 1,
             call("[", gradSetArgs[[1]], gradSetArgs[[2]]),
             call("[", gradSetArgs[[1]], gradSetArgs[[2]], gradSetArgs[[2]]),
             call("[", gradSetArgs[[1]], gradSetArgs[[2]], gradSetArgs[[2]],
                  gradSetArgs[[3]]),
             call("[", gradSetArgs[[1]], gradSetArgs[[2]], gradSetArgs[[2]],
                  gradSetArgs[[3]], gradSetArgs[[4]]))
    getRHS.varying <- function()
      {
          ans <- getRHS.noVarying()
          attr(ans, "gradient") <- eval(gradCall)
          ans
      }
    QR.rhs <- qr( rhs )
    lin <- qr.coef( QR.rhs, lhs )
    resid <- qr.resid( QR.rhs, lhs )
    topzero <- double( p1 )
    dev <- sum( resid^2 )
    if( marg <= 1) {
        ddot <- function( A, b ) A %*% b
        dtdot <- function( A, b ) t(A) %*% b
    } else if( marg == 2 ) {
        if( p1 == 1 ) {
            ddot <- function( A, b ) A*b
            dtdot <- function( A, b ) t(b) %*% A
        } else if( p2 == 1 ) {
            ddot <- function( A, b ) A %*% b
            dtdot <- function( A, b ) t(A) %*% b
        }
    } else {
        ddot <- function( A, b ) apply( A, MARGIN = 3, FUN="%*%", b )
        dtdot <- function( A, b ) apply( A, MARGIN = c(2,3), FUN = "%*%", b )
    }

    getPars.noVarying <- function()
      unlist( setNames( lapply( names( ind ), get, envir = env ),
                       names( ind ) ) )
    getPars.varying <- function()
      unlist( setNames( lapply( names( ind ), get, envir = env ),
                       names( ind ) ) )[useParams]
    getPars <- getPars.noVarying

    internalPars <- getPars()
    setPars.noVarying <- function(newPars)
      {
          assign("internalPars", newPars, envir = thisEnv)
          for( i in names( ind ) ) {
              assign( i, clearNames(newPars[ ind[[i]] ]), envir = env )
          }
      }
    setPars.varying <- function(newPars)
      {
          internalPars[useParams] <- newPars
          for( i in names( ind ) ) {
              assign( i, clearNames(internalPars[ ind[[i]] ]), envir = env )
          }
      }
    setPars <- setPars.noVarying
    getPred <-
      if(is.matrix(rhs))
        function(X) as.numeric(X %*% lin)
      else function(X) X * lin

    m <-
      list(resid = function() resid,
           fitted = function() getPred(rhs),
           formula = function() form,
           deviance = function() dev,
           gradient = function() attr( rhs, "gradient" ),
           conv = function() {
               assign("cc", c( topzero, qr.qty( QR.rhs, lhs)[ -(1:p1)]),
                      envir = thisEnv)
               rr <- qr.qy( QR.rhs, cc )
               B <- qr.qty( QR.rhs, ddot( attr( rhs, "gradient"), lin ) )
               B[1:p1, ] <- dtdot( attr( rhs, "gradient" ), rr )
               R <- t( qr.R( QR.rhs )[1:p1, ] )
               if( p1 == 1 ) B[1, ] <- B[1, ]/R
               else B[1:p1, ] <- forwardsolve(R, B[1:p1, ])
               assign( "QR.B", qr(B), envir = thisEnv )
               rr <- qr.qty( QR.B, cc )
               sqrt( fac*sum( rr[1:p1]^2 ) / sum( rr[-(1:p1)]^2 ) )
           },
           incr = function() { qr.solve( QR.B, cc) },
           setVarying = function(vary = rep(TRUE, length(useParams)))
           {
               assign("useParams", if(is.character(vary)) {
                   temp <- logical(length(useParams))
                   temp[unlist(ind[vary])] <- TRUE
                   temp
               } else if(is.logical(vary) && length(vary) != length(useParams))
                      stop("setVarying : vary length must match length of parameters")
               else {
                   vary
               }, envir = thisEnv)
               gradCall[[length(gradCall)]] <<- useParams
               if(all(useParams)) {
                   assign("setPars", setPars.noVarying, envir = thisEnv)
                   assign("getPars", getPars.noVarying, envir = thisEnv)
                   assign("getRHS", getRHS.noVarying, envir = thisEnv)
               } else {
                   assign("setPars", setPars.varying, envir = thisEnv)
                   assign("getPars", getPars.varying, envir = thisEnv)
                   assign("getRHS", getRHS.varying, envir = thisEnv)
               }
           },
           setPars = function( newPars ) {
               setPars(newPars)
               assign("QR.rhs",
                      qr( assign( "rhs", getRHS(), envir = thisEnv ) ),
                      envir = thisEnv)
               assign( "resid", qr.resid( QR.rhs, lhs ), envir = thisEnv )
               assign("dev", sum( resid^2 ), envir = thisEnv )
               if( QR.rhs$rank < p1 ) {
                   return(1)
               } else {
                   assign( "lin", qr.coef( QR.rhs, lhs ), envir = thisEnv )
                   return(0)
               }
           },
           getPars = function() getPars(),
           getAllPars = function() {
               c( getPars(), c( .lin = lin ) )
           },
           getEnv = function() env,
           trace = function() cat( format( dev ),":",
             format( c( getPars(), lin ) ), "\n" ),
           Rmat = function()
           {
               qr.R( qr( cbind( ddot( attr( rhs, "gradient"), lin ), rhs )))
           },
           predict = function(newdata = list(), qr = FALSE)
           {
               Env <- new.env()
               for (i in objects(envir = env)) {
                   assign(i, get(i, envir = env), envir = Env)
               }
               newdata <- as.list(newdata)
               for (i in names(newdata)) {
                   assign(i, newdata[[i]], envir = Env)
               }
               getPred(eval(form[[3]], env = Env))
           })
    class(m) <- c("nlsModel.plinear", "nlsModel")
    m$conv()
    on.exit( remove( data, i, m, marg, n, p, start, temp, gradSetArgs) )
    m
}

nlsModel <- function( form, data, start ) {
    thisEnv <- environment()
    env <- new.env()
    for( i in names( data ) ) {
        assign( i, data[[i]], envir = env )
    }
    ind <- as.list( start )
    parLength <- 0
    for( i in names( ind ) ) {
        temp <- start[[ i ]]
        storage.mode( temp ) <- "double"
        assign( i, temp, envir = env )
        ind[[ i ]] <- parLength + seq( along = start[[ i ]] )
        parLength <- parLength + length( start[[ i ]] )
    }
    useParams <- rep(TRUE, parLength)
    lhs <- eval( form[[2]], envir = env )
    rhs <- eval( form[[3]], envir = env )
    resid <- lhs - rhs
    dev <- sum( resid^2 )
    if( is.null( attr( rhs, "gradient" ) ) ) {
        getRHS.noVarying <- function()
          numericDeriv(form[[3]], names(ind), env)
        getRHS <- getRHS.noVarying
        rhs <- getRHS()
    } else {
        getRHS.noVarying <- function()
          eval( form[[3]], envir = env )
        getRHS <- getRHS.noVarying
    }
    dimGrad <- dim(attr(rhs, "gradient"))
    marg <- length(dimGrad)
    if(marg > 0) {
        gradSetArgs <- vector("list", marg+1)
        for(i in 2:marg)
          gradSetArgs[[i]] <- rep(TRUE, dimGrad[i-1])
        useParams <- rep(TRUE, dimGrad[marg])
    } else {
        gradSetArgs <- vector("list", 2)
        useParams <- rep(TRUE, length(attr(rhs, "gradient")))
    }
    npar <- length(useParams)
    gradSetArgs[[1]] <- (~attr(ans, "gradient"))[[2]]
    gradCall <-
      switch(length(gradSetArgs) - 1,
             call("[", gradSetArgs[[1]], gradSetArgs[[2]]),
             call("[", gradSetArgs[[1]], gradSetArgs[[2]], gradSetArgs[[2]]),
             call("[", gradSetArgs[[1]], gradSetArgs[[2]], gradSetArgs[[2]],
                  gradSetArgs[[3]]),
             call("[", gradSetArgs[[1]], gradSetArgs[[2]], gradSetArgs[[2]],
                  gradSetArgs[[3]], gradSetArgs[[4]]))
    getRHS.varying <- function()
      {
          ans <- getRHS.noVarying()
          attr(ans, "gradient") <- eval(gradCall)
          ans
      }
    QR <- qr( attr( rhs, "gradient" ) )
    qrDim <- min( dim( QR$qr ) )
    if( QR$rank < qrDim)
      stop("singular gradient matrix at initial parameter estimates")

    getPars.noVarying <- function()
      unlist( setNames( lapply( names( ind ), get, envir = env ),
                       names( ind ) ) )
    getPars.varying <- function()
      unlist( setNames( lapply( names( ind ), get, envir = env ),
                       names( ind ) ) )[useParams]
    getPars <- getPars.noVarying

    internalPars <- getPars()
    setPars.noVarying <- function(newPars)
      {
          assign("internalPars", newPars, envir = thisEnv)
          for( i in names( ind ) ) {
              assign( i, clearNames(newPars[ ind[[i]] ]), envir = env )
          }
      }
    setPars.varying <- function(newPars)
      {
          internalPars[useParams] <- newPars
          for( i in names( ind ) ) {
              assign( i, clearNames(internalPars[ ind[[i]] ]), envir = env )
          }
      }
    setPars <- setPars.noVarying

    on.exit(remove(i, data, parLength, start, temp))
    m <-
      list(resid = function() resid,
           fitted = function() rhs,
           formula = function() form,
           deviance = function() dev,
           gradient = function() attr( rhs, "gradient" ),
           conv = function()
           {
               rr <- qr.qty( QR, resid ) # rotated residual vector
               sqrt( sum( rr[1:npar]^2 ) / sum( rr[ -(1:npar) ]^2 ) )
           },
           incr = function() qr.coef( QR, resid ),
           setVarying = function(vary = rep(TRUE, length(useParams)))
           {
               assign("useParams", if(is.character(vary)) {
                   temp <- logical(length(useParams))
                   temp[unlist(ind[vary])] <- TRUE
                   temp
               } else if(is.logical(vary) && length(vary) != length(useParams))
                      stop("setVarying : vary length must match length of parameters")
               else {
                   vary
               }, envir = thisEnv)
               gradCall[[length(gradCall)]] <<- useParams
               if(all(useParams)) {
                   assign("setPars", setPars.noVarying, envir = thisEnv)
                   assign("getPars", getPars.noVarying, envir = thisEnv)
                   assign("getRHS", getRHS.noVarying, envir = thisEnv)
                   assign("npar", length(useParams), envir = thisEnv)
               } else {
                   assign("setPars", setPars.varying, envir = thisEnv)
                   assign("getPars", getPars.varying, envir = thisEnv)
                   assign("getRHS", getRHS.varying, envir = thisEnv)
                   assign("npar", length((1:length(useParams))[useParams]),
                          envir = thisEnv)
               }
           },
           setPars = function( newPars )
           {
               setPars(newPars)
               assign( "resid",
                      lhs - assign("rhs", getRHS(), envir = thisEnv ),
                      envir = thisEnv )
               assign( "dev", sum( resid^2), envir = thisEnv)
               assign( "QR", qr( attr( rhs, "gradient")), envir = thisEnv )
               return( QR$rank < min( dim( QR$qr ) ) )  # to catch the singular gradient matrix
           },
           getPars = function() getPars(),
           getAllPars = function() getPars(),
           getEnv = function() env,
           trace = function() cat( format(dev),": ", format( getPars() ), "\n"),
           Rmat = function() qr.R( QR ),
           predict = function(newdata = list(), qr = FALSE)
           {
               Env <- new.env()
               for (i in objects(envir = env)) {
                   assign(i, get(i, envir = env), envir = Env)
               }
               newdata <- as.list(newdata)
               for (i in names(newdata)) {
                   assign(i, newdata[[i]], envir = Env)
               }
               eval(form[[3]], env = Env)
           })
    class(m) <- "nlsModel"
    m
}

nls.control <- function( maxiter = 50, tol = 0.00001, minFactor = 1/1024 ) {
    list( maxiter = maxiter, tol = tol, minFactor = minFactor )
}

nls <-
  function (formula, data = parent.frame(), start, control,
            algorithm="default", trace = FALSE,
            subset, weights, na.action)
{
    mf <- match.call()             # for creating the model frame
    formula <- as.formula( formula )
    varNames <- all.vars(formula)  # parameter and variable names from formula

    ## adjust a one-sided model formula by using 0 as the response
    if (length(formula) == 2) {
        formula[[3]] <- formula[[2]]
        formula[[2]] <- 0
    }

    ## get names of the parameters from the starting values or selfStart model
    if (missing(start)) {
        if(!is.null(attr(data, "parameters"))) {
            pnames <- names(attr(data, "parameters"))
        } else {
            cll <- formula[[length(formula)]]
            func <- get(as.character(cll[[1]]))
            pnames <-
                as.character(as.list(match.call(func, call = cll))[-1][attr(func,
                                                      "pnames")])
        }
    } else {
        pnames <- names(start)
    }

    ## Heuristics for determining which names in formula represent actual
    ## variables
    ## If it is a parameter it is not a variable (nothing to guess here :-)
    varNames <- varNames[is.na(match(varNames, pnames, nomatch = NA))]
    ## If its length is a multiple of the response or LHS of the formula,
    ## then it is probably a variable
    ## This may fail if evaluation of formula[[2]] fails
    varIndex <- sapply(varNames, function(varName, data, respLength)
                       { length(eval(as.name(varName), data)) %% respLength == 0
                       }, data, length(eval(formula[[2]], data)))

    mf$formula <-                         # replace RHS by linear model formula
      parse( text = paste("~", paste( varNames[varIndex], collapse = "+")))[[1]]

    mf$start <- mf$control <- mf$algorithm <- mf$trace <- NULL
    mf[[1]] <- as.name("model.frame")
    mf <- as.list(eval(mf, parent.frame()))
    if (missing(start)) {
        start <- getInitial(formula, mf)
    }
    for(var in varNames[!varIndex])
        mf[[var]] <- eval(as.name(var), data)

    m <- switch(algorithm,
                plinear = nlsModel.plinear( formula, mf, start ),
                nlsModel( formula, mf, start ) )
    ctrl <- nls.control()
    if(!missing(control)) {
        control <- as.list(control)
        ctrl[names(control)] <- control
    }
    nls.out <- list(m = .Call("nls_iter", m, ctrl, trace,
                              PACKAGE = "nls"),
                    data = substitute( data ), call = match.call())
    nls.out$call$control <- ctrl
    nls.out$call$trace <- trace
    class(nls.out) <- "nls"
    nls.out
}

coef.nls <- function( x, ... ) x$m$getAllPars()

print.nls <- function(x, ...) {
    cat( "Nonlinear regression model\n" )
    cat( "  model: ", deparse( formula(x) ), "\n" )
    cat( "   data: ", as.character( x$data ), "\n" )
    print( x$m$getAllPars() )
    cat( " residual sum-of-squares: ", format( x$m$deviance() ), "\n" )
    invisible(x)
}

summary.nls <- function (object, ...)
{
    z <- .Alias(object)
    resid <- resid(z)
    n <- length(resid)
    param <- coef(z)
    pnames <- names(param)
    p <- length(param)
    rdf <- n - p
    p1 <- 1:p
    r <- resid(z)
    f <- fitted(z)
    R <- z$m$Rmat()
    w <- weights(z)
    if (!is.null(w)) {
        w <- w^0.5
        resid <- resid * w
        f <- f * w
        excl <- w == 0
        if (any(excl)) {
            warning(paste(sum(excl), "rows with zero weights not counted"))
            r <- r[!excl]
            f <- f[!excl]
            rdf <- rdf - sum(excl)
        }
    }
    rss <- deviance(z)
    if (n > p) {
        resvar <- rss/rdf
    }
    R <- chol2inv(R)
    dimnames(R) <- list(pnames, pnames)
    se <- sqrt(diag(R) * resvar)
    correl <- (R * resvar)/outer(se, se)
    ans <- list(formula = formula(z), residuals = r, sigma = sqrt(resvar),
                df = c(p, rdf), cov.unscaled = R, correlation = correl)
    tval <- param/se
    param <- cbind( param, se, tval, 2 * (1 - pt(abs(tval), rdf)) )
    dimnames(param) <-
      list(pnames, c("Estimate", "Std. Error", "t value", "Pr(>|t|)"))
    ans$parameters <- param
    class(ans) <- "summary.nls"
    ans
}

print.summary.nls <-
  function (x, digits = max(3, getOption("digits") - 3), symbolic.cor = p >
            4, signif.stars = getOption("show.signif.stars"), ...)
{
    cat("\nFormula: ")
    cat(paste(deparse(x$formula), sep = "\n", collapse = "\n"),
        "\n", sep = "")
    df <- x$df
    rdf <- df[2]
    cat("\nParameters:\n")
    print.coefmat(x$parameters, digits = digits, signif.stars = signif.stars,
                  ...)
    cat("\nResidual standard error:", format(signif(x$sigma,
                                                    digits)), "on", rdf, "degrees of freedom\n")
    correl <- x$correlation
    if (!is.null(correl)) {
        p <- dim(correl)[2]
        if (p > 1) {
            cat("\nCorrelation of Parameter Estimates:\n")
            if (symbolic.cor)
              print(symnum(correl)[-1, -p])
            else {
                correl[!lower.tri(correl)] <- NA
                print(correl[-1, -p, drop = FALSE], digits = digits, na = "")
            }
        }
    }
    cat("\n")
    invisible(x)
}

weights.nls <- function( object, ... ) object$weights

predict.nls <-
  function(object, newdata, se.fit = FALSE, scale = NULL, df = Inf,
           interval = c("none", "confidence", "prediction"), level = 0.95,
           ...)
{
    if (missing(newdata)) return(as.vector(fitted(object)))
    object$m$predict(newdata)
}

fitted.nls <- function(object, ...)
{
    val <- as.vector(object$m$fitted())

    lab <- "Fitted values"
    if (!is.null(aux <- attr(object, "units")$y)) {
        lab <- paste(lab, aux)
    }
    attr(val, "label") <- lab
    val
}

formula.nls <- function(object) object$m$formula()

residuals.nls <- function(object, type = c("response", "pearson"), ...)
{
    type <- match.arg(type)
    val <- as.vector(object$m$resid())
    if (type == "pearson") {
        std <- sqrt(sum(val^2)/(length(val) - length(coef(object))))
        val <- val/std
        attr(val, "label") <- "Standardized residuals"
    } else {
        lab <- "Residuals"
        if (!is.null(aux <- attr(object, "units")$y)) {
            lab <- paste(lab, aux)
        }
        attr(val, "label") <- lab
    }
    val
}

## logLik & AIC -- generic now in base

logLik.nls <- function(object, REML = FALSE)
{
    if (REML)
        stop("Cannot calculate REML log-likelihood for nls objects")

    res <- resid(object)
    N <- length(res)
    if(is.null(w <- object$weights)) {
        w <- rep(1, N)
    }
    val <-  -N * (log(2 * pi) + 1 - log(N) - sum(log(w)) + log(sum(w*res^2)))/2
    attr(val, "df") <- length(object[["parameters"]]) + 1
    attr(val, "nobs") <- attr(val, "nall") <- N
    class(val) <- "logLik"
    val
}

AIC.nls <- .Alias(AIC.lm) # AIC works via logLik

df.residual.nls <- function(object, ...)
{
    return(length(resid(object)) - length(coef(object)))
}

deviance.nls <- function(object, ...) object$m$deviance()


anova.nls <- function(object, ...)
{
    if(length(list(object, ...)) > 1) {
	return(anovalist.nls(object, ...))
    }
    stop("Anova is only defined for sequences of nls objects")
}

anovalist.nls <- function (object, ..., test = NULL)
{
    objects <- list(object, ...)
    responses <- as.character(lapply(objects,
				     function(x) formula(x)[[2]]))
    sameresp <- responses == responses[1]
    if (!all(sameresp)) {
	objects <- objects[sameresp]
	warning(paste("Models with response",
		      deparse(responses[!sameresp]),
		      "removed because response differs from", "model 1"))
    }
    ## calculate the number of models
    nmodels <- length(objects)
    if (nmodels == 1)
        stop("Anova is only defined for sequences of nls objects")

    models <- as.character(lapply(objects, function(x) formula(x)))

    ## extract statistics
    df.r <- unlist(lapply(objects, df.residual))
    ss.r <- unlist(lapply(objects, deviance))
    df <- c(NA, -diff(df.r))
    ss <- c(NA, -diff(ss.r))
    ms <- ss/df
    f <- p <- rep(NA,nmodels)
    for(i in 2:nmodels) {
	if(df[i] > 0) {
	    f[i] <- ms[i]/(ss.r[i]/df.r[i])
	    p[i] <- 1 - pf(f[i], df[i], df.r[i])
	}
	else if(df[i] < 0) {
	    f[i] <- ms[i]/(ss.r[i-1]/df.r[i-1])
	    p[i] <- 1 - pf(f[i], -df[i], df.r[i-1])
	}
	else { # df[i] == 0
	  ss[i] <- 0
	}
    }
    table <- data.frame(df.r,ss.r,df,ss,f,p)
    dimnames(table) <- list(1:nmodels, c("Res.Df", "Res.Sum Sq", "Df",
					 "Sum Sq", "F value", "Pr(>F)"))
    ## construct table and title
    title <- "Analysis of Variance Table\n"
    topnote <- paste("Model ", format(1:nmodels),": ",
		     models, sep="", collapse="\n")

    ## calculate test statistic if needed
    structure(table, heading = c(title, topnote),
	      class= c("anova", "data.frame"))# was "tabular"
}
### $Id: nlsFunc.R,v 1.1 1999/11/11 21:09:49 bates Exp $
###
###            Utility functions used with nls
###
### Copyright 1997,1999 Jose C. Pinheiro <jcp$research.bell-labs.com>,
###                     Douglas M. Bates <bates$stat.wisc.edu>
###           1999-1999 Saikat DebRoy <saikat$stat.wisc.edu>
###
### This file is part of the nls library for R and related languages.
### It is made available under the terms of the GNU General Public
### License, version 2, or at your option, any later version,
### incorporated herein by reference.
### 
### This program is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied
### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
### PURPOSE.  See the GNU General Public License for more
### details.
### 
### You should have received a copy of the GNU General Public
### License along with this program; if not, write to the Free
### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
### MA 02111-1307, USA

###
### asOneSidedFormula is extracted from the NLME-3.1 library for S 
###

asOneSidedFormula <-
  ## Converts an expression or a name or a character string
  ## to a one-sided formula
  function(object)
{
  if ((mode(object) == "call") && (object[[1]] == "~")) {
    object <- eval(object)
  }
  if (inherits(object, "formula")) {
    if (length(object) != 2) {
      stop(paste("Formula", deparse(as.vector(object)),
		 "must be of the form \"~expr.\""))
    }
    return(object)
  }
  do.call("~",
	  list(switch(mode(object),
		      name = ,
                      numeric = ,
		      call = object,
		      character = as.name(object),
		      expression = object[[1]],
		      stop(paste(substitute(object), "cannot be of mode",
				 mode(object))))))
}

setNames <- function( object, nm ) {
  names( object ) <- nm
  object
}

clearNames <- function( object ) {
  names( object ) <- NULL
  object
}
### $Id: profile.R,v 1.6 2000/06/27 22:26:18 pd Exp $
###
### Profiling nonlinear least squares for R
###
### Copyright 1999-1999 Saikat DebRoy <saikat$stat.wisc.edu>,
###                     Douglas M. Bates <bates$stat.wisc.edu>,
###
### This file is part of the nls library for R and related languages.
### It is made available under the terms of the GNU General Public
### License, version 2, or at your option, any later version,
### incorporated herein by reference.
###
### This program is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied
### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
### PURPOSE.  See the GNU General Public License for more
### details.
###
### You should have received a copy of the GNU General Public
### License along with this program; if not, write to the Free
### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
### MA 02111-1307, USA

profiler <-
  function(fitted, ...)
  UseMethod("profiler")

profiler.nls <-
  function(fitted, ...)
{
  fittedModel <- fitted$m
  ctrl <- fitted$call$control
  trace <- fitted$call$trace
  defaultPars <- fittedPars <- fittedModel$getPars()
  defaultVary <- rep(TRUE, length(defaultPars))
  S.hat <- sum(resid(fitted)^2)
  s2.hat <- summary(fitted)$sigma^2
  thisEnv <- environment()
  on.exit(remove(fitted))
  prof <- list(getFittedPars = function() fittedPars,
               getFittedModel = function() fittedModel,
               setDefault = function(varying, params)
               {
                 if(missing(params) && missing(varying)) {
                   fittedModel$setVarying()
                   fittedModel$setPars(fittedPars)
                   assign("defaultPars", fittedPars, envir = thisEnv)
                   assign("defaultVary", rep(TRUE, length(defaultPars)),
                                             envir = thisEnv)
                 } else {
                   if(!missing(params)) {
                     if(length(params) != length(fittedPars))
                       stop("params has wrong length")
                     assign("defaultPars", params, envir = thisEnv)
                   }
                   if(!missing(varying)) {
                     if(is.numeric(varying)) {
                       if(!all(varying %in% (1:length(fittedPars))))
                         stop("varying must be in 1:length(pars)")
                       varying <- !((1:length(fittedPars)) %in% varying)
                     } else if(is.logical(varying)) {
                       if(length(varying) != length(fittedPars))
                         stop("varying has wrong length")
                     } else if(is.character(varying)) {
                       if(!all(varying %in% names(fittedPars)))
                         stop("varying must be in 1:length(pars)")
                       varying <- !(names(fittedPars) %in% varying)
                     } else stop("varying must be logical, integer or character")
                     assign("defaultVary", varying, envir = thisEnv)
                   }
                 }
               },
               getProfile = function(...)
               {
                 args <- list(...)
                 if(length(args) == 0) {
                   vary <- defaultVary
                   startPars <- defaultPars
                 } else if(length(args) == 2 && is.logical(args[[1]])) {
                   vary <- args[[1]]
                   params <- unlist(args[[2]])
                   startPars <- defaultPars
                   startPars[!vary] <- params
                 } else {
                   if(length(args) == 1 && is.list(args[[1]])) {
                     params <- unlist(args[[1]])
                   } else if(all(sapply(args, is.numeric))) {
                     params <- unlist(args)
                   } else stop("invalid argument to getProfile")
                   if(!all(names(params) %in% names(fittedPars)))
                     stop("can not recognize parameter name")
                   startPars <- defaultPars
                   vary <- !(names(fittedPars) %in% names(params))
                   startPars[!vary] <- params
                 }
                 fittedModel$setVarying()
                 fittedModel$setPars(startPars)
                 fittedModel$setVarying(vary)
                 fittedModel$setPars(startPars[vary])
                 profiledModel <-
                   .Call("nls_iter", fittedModel, ctrl, trace,
                         PACKAGE = "nls")
                 fstat <- (profiledModel$deviance()-S.hat)/s2.hat
                 fittedModel$setVarying()
                 ans <- list(fstat = fstat,
                             parameters = profiledModel$getAllPars(),
                             varying = vary)
                 fittedModel$setPars(defaultPars)
                 ans
               })
  class(prof) <- c("profiler.nls", "profiler")
  prof
}

profile.nls <-
  function(fitted, which = 1:npar, maxpts = 100, alphamax = 0.01, delta.t =
           cutoff/5)
{
  f.summary <- summary(fitted)
  std.err <- f.summary$parameters[,"Std. Error"]
  npar <- length(std.err)
  nobs <- length(resid(fitted))
  cutoff <- sqrt(npar * qf(1 - alphamax, npar, nobs - npar))
  outmat <- array(0, c(nobs, npar))
  out <- list()
  prof <- profiler(fitted)
  on.exit(prof$setDefault()) # in case there is an abnormal exit
  for(par in which) {
    pars <- prof$getFittedPars()
    prof$setDefault(varying = par)
    sgn <- -1
    count <- 1
    varying <- rep(TRUE, npar)
    varying[par] <- FALSE
    tau <- double(2 * maxpts)
    par.vals <- array(0, c(2 * maxpts, npar), list(NULL, names(pars)))
    tau[1] <- 0
    par.vals[1,  ] <- pars
    base <- pars[par]
    profile.par.inc <- delta.t * std.err[par]
    pars[par] <- pars[par] - profile.par.inc
    while(count <= maxpts) {
      if(abs(pars[par] - base)/std.err[par] > 10 * cutoff)
        break
      count <- count + 1
      prof$setDefault(params = pars)
      ans <- prof$getProfile()
      tau[count] <- sgn*sqrt(ans$fstat)
      par.vals[count, ] <- pars <- ans$parameters
      if(abs(tau[count]) > cutoff)
        break
      pars <- pars + ((pars - par.vals[count - 1,  ]) *
                      delta.t)/abs(tau[count] - tau[count - 1])
    }
    tau[1:count] <- tau[count:1]
    par.vals[1:count,  ] <- par.vals[count:1,  ]
    sgn <- 1
    newmax <- count + maxpts
    pars <- par.vals[count,  ]
    while(count <= newmax) {
      pars <- pars + ((pars - par.vals[count - 1,  ]) *
                      delta.t)/abs(tau[count] - tau[count - 1])
      if(abs(pars[par] - base)/std.err[par] > 10 * cutoff)
        break
      count <- count + 1
      prof$setDefault(params = pars)
      ans <- prof$getProfile()
      tau[count] <- sgn*sqrt(ans$fstat)
      par.vals[count, ] <- pars <- ans$parameters
      if(abs(tau[count]) > cutoff)
        break
    }
    out[[par]] <- structure(list(tau = tau[1:count], par.vals =
                                 par.vals[1:count,  ]), class = "data.frame",
                            row.names = as.character(1:count),
                            parameters = list(par = par,
                              std.err = std.err[par]))
    prof$setDefault()
  }
  names(out)[which] <- names(coef(fitted))[which]
  attr(out, "original.fit") <- fitted
  attr(out, "summary") <- f.summary
  class(out) <- c("profile.nls", "profile")
  out
}

plot.profile.nls <- function(x, levels, conf = c(99, 95, 90, 80, 50)/100,
                             nseg = 50, absVal = TRUE, ...)
{
  require(splines)
  obj <- x
  dfres <- attr(obj, "summary")$df[2]
  confstr <- NULL
  if(missing(levels)) {
    levels <- sqrt(qf(pmax(0, pmin(1, conf)), 1, dfres))
    confstr <- paste(format(100 * conf), "%", sep = "")
  }
  if(any(levels <= 0)) {
    levels <- levels[levels > 0]
    warning("levels truncated to positive values only")
  }
  if(is.null(confstr)) {
    confstr <- paste(format(100 * pf(levels^2, 1, dfres)), "%", sep = "")
  }
  mlev <- max(levels) * 1.05
  nm <- names(obj)
  opar <- par(mar = c(5, 4, 1, 1) + 0.1)
  if (absVal) {
    for (i in seq(along = nm)) {
      sp <- interpSpline(obj[[i]]$par.vals[,i], obj[[i]]$tau)
      bsp <- backSpline(sp)
      xlim <- predict(bsp, c(-mlev, mlev))$y
      if (is.na(xlim[1])) xlim[1] <- min(x[[i]]$par.vals[, i])
      if (is.na(xlim[2])) xlim[2] <- max(x[[i]]$par.vals[, i])
      plot( abs(tau) ~ par.vals[, i], data = obj[[i]], xlab = nm[i],
           ylim = c(0, mlev), xlim = xlim, ylab = expression(abs(tau)), type = "n")
      avals <- rbind(as.data.frame(predict(sp)),
                     data.frame(x = obj[[i]]$par.vals[, i], y = obj[[i]]$tau))
      avals$y <- abs(avals$y)
      lines( avals[ order(avals$x), ], col = 4 )
      abline( v = predict(bsp, 0)$y , col = 3, lty = 2 )
      for( lev in  levels ) {
        pred <- predict( bsp, c(-lev, lev) )$y
        lines( pred, rep(lev, 2), type = "h", col = 6, lty = 2 )
        lines( pred, rep(lev, 2), type = "l", col = 6, lty = 2 )
      }
    }
  } else {
    for (i in seq(along = nm)) {
      sp <- interpSpline(obj[[i]]$par.vals[,i], obj[[i]]$tau)
      bsp <- backSpline(sp)
      xlim <- predict(bsp, c(-mlev, mlev))$y
      if (is.na(xlim[1])) xlim[1] <- min(x[[i]]$par.vals[, i])
      if (is.na(xlim[2])) xlim[2] <- max(x[[i]]$par.vals[, i])
      plot( tau ~ par.vals[, i], data = obj[[i]], xlab = nm[i],
           ylim = c(-mlev, mlev), xlim = xlim, ylab = expression(tau), type = "n")
      lines(predict(sp), col = 4)
      abline(h = 0, col = 3, lty = 2 )
      for( lev in  levels ) {
         pred <- predict( bsp, c(-lev, lev) )$y
         lines( pred, c(-lev, lev), type = "h", col = 6, lty = 2 )
      }
    }
  }
  par( opar )
}

### $Id: selfStart.R,v 1.3 2001/03/24 06:57:56 ripley Exp $
###
###            self-starting nonlinear regression models
###
### Copyright 1997,1999 Jose C. Pinheiro <jcp$research.bell-labs.com>,
###                     Douglas M. Bates <bates$stat.wisc.edu>
###
### This file is part of the nls library for R and related languages
### and was taken from the nlme library for S.
### It is made available under the terms of the GNU General Public
### License, version 2, or at your option, any later version,
### incorporated herein by reference.
###
### This program is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied
### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
### PURPOSE.  See the GNU General Public License for more
### details.
###
### You should have received a copy of the GNU General Public
### License along with this program; if not, write to the Free
### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
### MA 02111-1307, USA

####* Constructors

selfStart <-
    function(model, initial, parameters, template) UseMethod("selfStart")

selfStart.default <-
  function(model, initial, parameters, template)
{
    value <- structure(as.function(model), initial = as.function(initial),
                       pnames = parameters)
    class(value) <- "selfStart"
    value
}

selfStart.formula <-
    function(model, initial, parameters, template = NULL)
{
    if (is.null(template)) {		# create a template if not given
        nm <- all.vars(model)
        if (any(msng <- is.na(match(parameters, nm)))) {
            stop(paste("Parameter(s)", parameters[msng],
                       "do not occur in the model formula"))
        }
        template <- function() {}
        argNams <- c( nm[ is.na( match(nm, parameters) ) ], parameters )
        args <- rep( alist( a = , b = 3 )[-2], length( argNams ) )
        names(args) <- argNams
        formals(template) <- args
    }
    value <- structure(deriv(model, parameters, template),
                       initial = as.function(initial),
                       pnames = parameters)
    class(value) <- "selfStart"
    value
}

###*# Methods


##*## Generics and methods specific to selfStart models

getInitial <-
    ## Create initial values for object from data
    function(object, data, ...) UseMethod("getInitial")

getInitial.formula <-
    function(object, data, ...)
{
    if(!is.null(attr(data, "parameters"))) {
        return(attr(data, "parameters"))
    }
    #obj <- object                       # kluge to create a copy inside this
    #object[[1]] <- as.name("~")	 # function. match.call() is misbehaving
    switch (length(object),
            stop("argument \"object\" has an impossible length"),
        {				# one-sided formula
	    func <- get(as.character(object[[2]][[1]]))
	    getInitial(func, data,
		       mCall = as.list(match.call(func, call = object[[2]])),
                       ...)
        },
        {				# two-sided formula
	    func <- get(as.character(object[[3]][[1]]))
	    getInitial(func, data,
		       mCall = as.list(match.call(func, call = object[[3]])),
		       LHS = object[[2]], ...)
        })
}

getInitial.selfStart <-
    function(object, data, mCall, LHS = NULL, ...)
{
    (attr(object, "initial"))(mCall = mCall, data = data, LHS = LHS)
}

getInitial.default <-
    function(object, data, mCall, LHS = NULL, ...)
{
    if (is.function(object) && !is.null(attr(object, "initial"))) {
        stop(paste("old-style self-starting model functions\n",
                   "are no longer supported.\n",
                   "New selfStart functions are available.\n",
                   "Use\n",
                   "  SSfpl instead of fpl,\n",
                   "  SSfol instead of first.order.log,\n",
                   "  SSbiexp instead of biexp,\n",
                   "  SSlogis instead of logistic.\n",
                   "If writing your own selfStart model, see\n",
                   "  \"help(selfStart)\"\n",
                   "for the new form of the \"initial\" attribute.", sep="" ))
    }
    stop(paste("No getInitial method found for", data.class(object), "objects"))
}

sortedXyData <-
    ## Constructor of the sortedXyData class
    function(x, y, data) UseMethod("sortedXyData")

sortedXyData.default <-
    function(x, y, data)
{
    ## works for x and y either numeric or language elements
    ## that can be evaluated in data
    #data <- as.data.frame(data)
    if (is.language(x) || ((length(x) == 1) && is.character(x))) {
        x <- eval(asOneSidedFormula(x)[[2]], data)
    }
    x <- as.numeric(x)
    if (is.language(y) || ((length(y) == 1) && is.character(y))) {
        y <- eval(asOneSidedFormula(y)[[2]], data)
    }
    y <- as.numeric(y)
    y.avg <- tapply(y, x, mean, na.rm = TRUE)
    xvals <- as.numeric(names(y.avg))
    ord <- order(xvals)
    value <- na.omit(data.frame(x = xvals[ord], y = as.vector(y.avg[ord])))
    class(value) <- c("sortedXyData", "data.frame")
    value
}

NLSstClosestX <-
    ## find the x value in the xy frame whose y value is closest to yval
    function(xy, yval) UseMethod("NLSstClosestX")

NLSstClosestX.sortedXyData <-
    ## find the x value in the xy frame whose y value is closest to yval
    ## uses linear interpolation in case the desired x falls between
    ## two data points in the xy frame
    function(xy, yval)
{
    deviations <- xy$y - yval
    if (any(deviations <= 0)) {
        dev1 <- max(deviations[deviations <= 0])
        lim1 <- xy$x[match(dev1, deviations)]
        if (all(deviations <= 0)) {
            return(lim1)
        }
    }
    if (any(deviations >= 0)) {
        dev2 <- min(deviations[deviations >= 0])
        lim2 <- xy$x[match(dev2, deviations)]
        if (all(deviations >= 0)) {
            return(lim2)
        }
    }
    dev1 <- abs(dev1)
    dev2 <- abs(dev2)
    lim1 + (lim2 - lim1) * dev1/(dev1 + dev2)
}

NLSstRtAsymptote <-
    ## Find a reasonable value for the right asymptote.
    function(xy) UseMethod("NLSstRtAsymptote")

NLSstRtAsymptote.sortedXyData <-
    function(xy)
{
    ## Is the last response value closest to the minimum or to
    ## the maximum?
    in.range <- range(xy$y)
    last.dif <- abs(in.range - xy$y[nrow(xy)])
    ## Estimate the asymptote as the largest (smallest) response
    ## value plus (minus) 1/8 of the range.
    if(match(min(last.dif), last.dif) == 2) {
        return(in.range[2] + diff(in.range)/8)
    }
    in.range[1] - diff(in.range)/8
}

NLSstLfAsymptote <-
    ## Find a reasonable value for the left asymptote.
    function(xy) UseMethod("NLSstLfAsymptote")

NLSstLfAsymptote.sortedXyData <-
    function(xy)
{
    ## Is the first response value closest to the minimum or to
    ## the maximum?
    in.range <- range(xy$y)
    first.dif <- abs(in.range - xy$y[1])
    ## Estimate the asymptote as the largest (smallest) response
    ## value plus (minus) 1/8 of the range.
    if(match(min(first.dif), first.dif) == 2) {
        return(in.range[2] + diff(in.range)/8)
    }
    in.range[1] - diff(in.range)/8
}

NLSstAsymptotic <-
    ## fit the asymptotic regression model in the form
    ## b0 + b1*exp(-exp(lrc) * x)
    function(xy) UseMethod("NLSstAsymptotic")

NLSstAsymptotic.sortedXyData <-
    function(xy)
{
    xy$rt <- NLSstRtAsymptote(xy)
    ## Initial estimate of log(rate constant) from a linear regression
    value <- coef(nls(y ~ cbind(1, 1 - exp(-exp(lrc) * x)),
                      data = xy,
                      start = list(lrc =
                      as.vector(log(-coef(lm(log(abs(y - rt)) ~ x,
                                             data = xy))[2]))),
                      algorithm = "plinear"))[c(2, 3, 1)]
    names(value) <- c("b0", "b1", "lrc")
    value
}

### Local variables:
### mode: S
### End:

### $Id: zzModels.R,v 1.5 2001/05/10 02:50:31 bates Exp $
###
###       Individual selfStarting nonlinear regression models
###
### Copyright 1997, 1999 Jose C. Pinheiro <jcp$research.bell-labs.com>,
###                      Douglas M. Bates <bates$stat.wisc.edu>
###
### This file is part of the nls library for R and related languages
### and was taken from the nlme library for S.
### It is made available under the terms of the GNU General Public
### License, version 2, or at your option, any later version,
### incorporated herein by reference.
###
### This program is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied
### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
### PURPOSE.  See the GNU General Public License for more
### details.
###
### You should have received a copy of the GNU General Public
### License along with this program; if not, write to the Free
### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
### MA 02111-1307, USA

##*## SSasymp - asymptotic regression model

SSasymp <- # selfStart(~ Asym + (R0 - Asym) * exp(-exp(lrc) * input),
    selfStart(function(input, Asym, R0, lrc)
          {
              .expr1 <- R0 - Asym
              .expr2 <- exp(lrc)
              .expr5 <- exp((( - .expr2) * input))
              .value <- Asym + (.expr1 * .expr5)
              .actualArgs <- as.list(match.call()[c("Asym", "R0", "lrc")])
              if(all(unlist(lapply(.actualArgs, is.name))))
              {
                  .grad <- array(0, c(length(.value), 3),
                                 list(NULL, c("Asym", "R0", "lrc")))
                  .grad[, "Asym"] <- 1 - .expr5
                  .grad[, "R0"] <- .expr5
                  .grad[, "lrc"] <-  -(.expr1*(.expr5*(.expr2*input)))
                  dimnames(.grad) <- list(NULL, .actualArgs)
                  attr(.value, "gradient") <- .grad
              }
              .value
          },
              function(mCall, data, LHS)
          {
              xy <- sortedXyData(mCall[["input"]], LHS, data)
              if (nrow(xy) < 3) {
                  stop("Too few distinct input values to fit a asymptotic regression model")
              }
              if(nrow(xy) > 3) {
                  xy$ydiff <- abs(xy$y - NLSstRtAsymptote(xy))
                  xy <- data.frame(xy)
                  lrc <- log( - coef(lm(log(ydiff) ~ x, data = xy))[2])
                  names(lrc) <- NULL
                  ## This gives an estimate of the log (rate constant).  Use that
                  ## with a partially linear nls algorithm
                  pars <- coef(nls(y ~ cbind(1 - exp( - exp(lrc) * x),
                                             exp(- exp(lrc) * x)),
                                   data = xy,
                                   start = list(lrc = lrc),
                                   algorithm = "plinear"))
              }
              else {
                  ydiff <- diff(xy$y)
                  if(prod(ydiff) <= 0) {
                      stop("Can't fit an asymptotic regression model to these data")
                  }
                  avg.resp <- xy$y
                  frac <- (avg.resp[3] - avg.resp[1])/(avg.resp[2] - avg.resp[1])
                  xunique <- unique(xy$x)
                  xdiff <- diff(xunique)
                  if(xdiff[1] == xdiff[2]) {	# equal spacing - can use a shortcut
                      expmRd <- frac - 1
                      rc <-  - log(expmRd)/xdiff[1]
                      lrc <- log(rc)
                      expmRx1 <- exp( - rc * xunique[1])
                      bma <- ydiff[1]/(expmRx1 * (expmRd - 1))
                      Asym <- avg.resp[1] - bma * expmRx1
                      pars <- c(lrc = lrc, Asym = Asym, R0 = bma + Asym)
                  }
                  else {
                      stop("Too few observations to fit an asymptotic regression model")
                  }
              }
              names(pars) <- NULL
              val <- list(pars[2], pars[3], pars[1])
              names(val) <- mCall[c("Asym", "R0", "lrc")]
              val
          },
              parameters = c("Asym", "R0", "lrc"))

##*## SSasympOff - alternate formulation of asymptotic regression model
##*## with an offset

SSasympOff <- # selfStart(~ Asym *( 1 - exp(-exp(lrc) * (input - c0) ) ),
    selfStart(
              function(input, Asym, lrc, c0)
          {
              .expr1 <- exp(lrc)
              .expr3 <- input - c0
              .expr5 <- exp((( - .expr1) * .expr3))
              .expr6 <- 1 - .expr5
              .value <- Asym * .expr6
              .actualArgs <- as.list(match.call()[c("Asym", "lrc", "c0")])
              if(all(unlist(lapply(.actualArgs, is.name))))
              {
                  .grad <- array(0, c(length(.value), 3), list(NULL, c("Asym", "lrc", "c0")))
                  .grad[, "Asym"] <- .expr6
                  .grad[, "lrc"] <- Asym * (.expr5 * (.expr1 * .expr3))
                  .grad[, "c0"] <-  - (Asym * (.expr5 * .expr1))
                  dimnames(.grad) <- list(NULL, .actualArgs)
                  attr(.value, "gradient") <- .grad
              }
              .value
          },
              function(mCall, data, LHS)
          {
              xy <- sortedXyData(mCall[["input"]], LHS, data)
              if (nrow(xy) < 4) {
                  stop("Too few distinct input values to fit the asympOff model")
              }
              xy$ydiff <- abs(xy$y - NLSstRtAsymptote(xy))
              xy <- data.frame(xy)
              lrc <- log( - coef(lm(log(ydiff) ~ x, data = xy))[2]) # log( rate constant)
              pars <- as.vector(coef(nls(y ~ cbind(1, exp(- exp(lrc) * x)),
                                         data = xy, algorithm = "plinear",
                                         start = list(lrc = lrc))))
              val <- list(pars[2], pars[1], exp(-pars[1]) * log(-pars[3]/pars[2]))
              names(val) <- mCall[c("Asym", "lrc", "c0")]
              val
          }, parameters = c("Asym", "lrc", "c0"))

##*## SSasympOrig - exponential curve through the origin to an asymptote

SSasympOrig <- # selfStart(~ Asym * (1 - exp(-exp(lrc) * input)),
    selfStart(
              function(input, Asym, lrc)
          {
              .expr1 <- exp(lrc)
              .expr4 <- exp((( - .expr1) * input))
              .expr5 <- 1 - .expr4
              .value <- Asym * .expr5
              .actualArgs <- as.list(match.call()[c("Asym", "lrc")])
              if(all(unlist(lapply(.actualArgs, is.name))))
              {
                  .grad <- array(0, c(length(.value), 2), list(NULL, c("Asym", "lrc")))
                  .grad[, "Asym"] <- .expr5
                  .grad[, "lrc"] <- Asym * (.expr4 * (.expr1 * input))
                  dimnames(.grad) <- list(NULL, .actualArgs)
                  attr(.value, "gradient") <- .grad
              }
              .value
          },
              function(mCall, data, LHS)
          {
              xy <- sortedXyData(mCall[["input"]], LHS, data)
              if (nrow(xy) < 3) {
                  stop("Too few distinct input values to fit the asympOrig model")
              }
              ## get a preliminary estimate for A
              A0 <- NLSstRtAsymptote(xy)
              ## get a least squares estimate for log of the rate constant
              lrc <- log(abs(mean(log(1 - xy$y/A0)/xy$x, na.rm = TRUE)))
              ## use the partially linear form to converge quickly
              xy <- data.frame(xy)
              pars <- as.vector(coef(nls(y ~ 1 - exp(-exp(lrc)*x),
                                         data = xy,
                                         start = list(lrc = lrc),
                                         algorithm = "plinear")))
              value <- c(pars[2], pars[1])
              names(value) <- mCall[c("Asym", "lrc")]
              value
          }, parameters = c("Asym", "lrc"))

##*## SSbiexp - linear combination of two exponentials

SSbiexp <-
                                        #  selfStart(~ A1 * exp(-exp(lrc1)*input) + A2 * exp(-exp(lrc2) * input),
    selfStart(
              function(input, A1, lrc1, A2, lrc2)
          {
              .expr1 <- exp(lrc1)
              .expr4 <- exp((( - .expr1) * input))
              .expr6 <- exp(lrc2)
              .expr9 <- exp((( - .expr6) * input))
              .value <- (A1 * .expr4) + (A2 * .expr9)
              .actualArgs <- as.list(match.call()[c("A1", "lrc1", "A2", "lrc2")])
              if(all(unlist(lapply(.actualArgs, is.name))))
              {
                  .grad <- array(0, c(length(.value), 4),
                                 list(NULL, c("A1", "lrc1", "A2", "lrc2")))
                  .grad[, "A1"] <- .expr4
                  .grad[, "lrc1"] <-  - (A1 * (.expr4 * (.expr1 * input)))
                  .grad[, "A2"] <- .expr9
                  .grad[, "lrc2"] <-  - (A2 * (.expr9 * (.expr6 * input)))
                  dimnames(.grad) <- list(NULL, .actualArgs)
                  attr(.value, "gradient") <- .grad
              }
              .value
          },
              function(mCall, data, LHS)
          {
              xy <- data.frame(sortedXyData(mCall[["input"]], LHS, data))
              if (nrow(xy) < 5) {
                  stop("Too few distinct input values to fit a biexponential")
              }
              ndistinct <- nrow(xy)
              nlast <- max(3, round(ndistinct/2))		# take at least half the data
              dlast <- xy[(ndistinct + 1 - nlast):ndistinct, ]
              pars2 <- coef(lm(log(y) ~ x, data = dlast))
              lrc2 <- log(abs(pars2[2]))		# log of the slope
              xy[["res"]] <- xy[["y"]] - exp(pars2[1]) * exp(-exp(lrc2)*xy[["x"]])
              dfirst <- xy[1:(ndistinct - nlast), ]
              pars1 <- coef(lm(log(abs(res)) ~ x, data = dfirst))
              lrc1 <- log(abs(pars1[2]))
              pars <- coef(nls(y ~ cbind(exp(-exp(lrc1)*x), exp(-exp(lrc2)*x)),
                               data = xy,
                               start = list(lrc1 = lrc1, lrc2 = lrc2),
                               algorithm = "plinear"))
              value <- c(pars[3], pars[1], pars[4], pars[2])
              names(value) <- mCall[c("A1", "lrc1", "A2", "lrc2")]
              value
          }, parameters = c("A1", "lrc1", "A2", "lrc2"))

##*## SSfol - first order compartment model with the log of the rates
##*##         and the clearence
SSfol <-
                                        #  selfStart(~Dose * exp(lKe + lKa - lCl) * (exp(-exp(lKe) * input) -
                                        # exp(-exp(lKa) * input))/(exp(lKa) - exp(lKe)),
    selfStart(
              function(Dose, input, lKe, lKa, lCl)
          {
              .expr4 <- Dose * (exp(((lKe + lKa) - lCl)))
              .expr5 <- exp(lKe)
              .expr8 <- exp((( - .expr5) * input))
              .expr9 <- exp(lKa)
              .expr12 <- exp((( - .expr9) * input))
              .expr14 <- .expr4 * (.expr8 - .expr12)
              .expr15 <- .expr9 - .expr5
              .expr16 <- .expr14/.expr15
              .expr23 <- .expr15^2
              .value <- .expr16
              .actualArgs <- as.list(match.call()[c("lKe", "lKa", "lCl")])
              if(all(unlist(lapply(.actualArgs, is.name))))
              {
                  .grad <- array(0, c(length(.value), 3), list(NULL, c("lKe", "lKa", "lCl")))
                  .grad[, "lKe"] <- ((.expr14 - (.expr4 * (.expr8 * (.expr5 * input))))/
                                     .expr15) + ((.expr14 * .expr5)/.expr23)
                  .grad[, "lKa"] <- ((.expr14 + (.expr4 * (.expr12 * (.expr9 * input))))/
                                     .expr15) - ((.expr14 * .expr9)/.expr23)
                  .grad[, "lCl"] <-  - .expr16
                  dimnames(.grad) <- list(NULL, .actualArgs)
                  attr(.value, "gradient") <- .grad
              }
              .value
          },
              function(mCall, data, LHS)
          {
              data <- data.frame(data)
              resp <- eval(LHS, data)
              input <- eval(mCall[["input"]], data)
              Dose <- eval(mCall[["Dose"]], data)
              n <- length(resp)
              if(length(input) != n) {
                  stop(paste("must have length of response =",
                             "length of second argument to SSfol"))
              }
              if(n < 4) {
                  stop("must have at least 4 observations to fit an SSfol model")
              }
              rmaxind <- order(resp)[n]

              lresp <- log(resp)
              if(rmaxind == n) {
                  lKe <- -2.5
              } else {
                  lKe <- log((lresp[rmaxind] - lresp[n])/(input[n] - input[rmaxind]))
              }
              cond.lin <- nls(resp ~ (exp(-input * exp(lKe))-exp(-input * exp(lKa))) * Dose,
                              data = list(resp = resp, input = input, Dose = Dose, lKe = lKe),
                              start = list(lKa = lKe + 1),
                              algorithm = "plinear")
              pars <- coef(cond.lin)
              names(pars) <- NULL
              cond.lin <- nls(resp ~ (Dose * (exp(-input*exp(lKe))-
                                              exp(-input*exp(lKa))))/(exp(lKa) - exp(lKe)),
                              data = data.frame(list(resp = resp, input = input,
                              Dose = Dose)),
                              start = list(lKa = pars[1],lKe = lKe),
                              algorithm = "plinear")
              pars <- coef(cond.lin)
              names(pars) <- NULL
              lKa <- pars[1]
              lKe <- pars[2]
              Ka <- exp(lKa)
              Ke <- exp(lKe)
              value <- list(lKe, lKa, log((Ke * Ka)/(pars[3])))
              names(value) <- as.character(mCall)[4:6]
              value
          }, parameters = c("lKe", "lKa", "lCl"))

##*## SSfpl - four parameter logistic model

SSfpl <- # selfStart(~ A + (B - A)/(1 + exp((xmid - input)/scal)),
    selfStart(
              function(input, A, B, xmid, scal)
          {
              .expr1 <- B - A
              .expr2 <- xmid - input
              .expr4 <- exp((.expr2/scal))
              .expr5 <- 1 + .expr4
              .expr8 <- 1/.expr5
              .expr13 <- .expr5^2
              .value <- A + (.expr1/.expr5)
              .actualArgs <- as.list(match.call()[c("A", "B", "xmid", "scal")])
              if(all(unlist(lapply(.actualArgs, is.name))))
              {
                  .grad <- array(0, c(length(.value), 4),
                                 list(NULL, c("A", "B", "xmid", "scal")))
                  .grad[, "A"] <- 1 - .expr8
                  .grad[, "B"] <- .expr8
                  .grad[, "xmid"] <-  - ((.expr1 * (.expr4 * (1/ scal)))/.expr13)
                  .grad[, "scal"] <- (.expr1 * (.expr4 * (.expr2/(scal^2))))/.expr13
                  dimnames(.grad) <- list(NULL, .actualArgs)
                  attr(.value, "gradient") <- .grad
              }
              .value
          },
              function(mCall, data, LHS)
          {
              xy <- sortedXyData(mCall[["input"]], LHS, data)
              if (nrow(xy) < 5) {
                  stop("Too few distinct input values to fit a four-parameter logistic")
              }
              ## convert the response to a proportion (i.e. contained in (0,1))
              rng <- range(xy$y); drng <- diff(rng)
              xy$prop <- (xy$y - rng[1] + 0.05 * drng)/(1.1 * drng)
              ## inverse regression of the x values on the proportion
              ir <- as.vector(coef(lm(x ~ I(log(prop/(1-prop))), data = xy)))
              pars <- as.vector(coef(nls(y ~ cbind(1, 1/(1 + exp((xmid - x)/
                                                                 exp(lscal)))),
                                         data = xy,
                                         start = list(xmid = ir[1],
                                                      lscal = log(abs(ir[2]))),
                                         algorithm = "plinear")))
              value <- c(pars[3], pars[3] + pars[4], pars[1], exp(pars[2]))
              names(value) <- mCall[c("A", "B", "xmid", "scal")]
              value
          }, parameters = c("A", "B", "xmid", "scal"))

##*## SSlogis - logistic model for nonlinear regression

SSlogis <- # selfStart(~ Asym/(1 + exp((xmid - input)/scal)),
    selfStart(
              function(input, Asym, xmid, scal)
          {
              .expr1 <- xmid - input
              .expr3 <- exp((.expr1/scal))
              .expr4 <- 1 + .expr3
              .expr10 <- .expr4^2
              .value <- Asym/.expr4
              .actualArgs <- as.list(match.call()[c("Asym", "xmid", "scal")])
              if(all(unlist(lapply(.actualArgs, is.name))))
              {
                  .grad <- array(0, c(length(.value), 3), list(NULL, c("Asym", "xmid", "scal")))
                  .grad[, "Asym"] <- 1/.expr4
                  .grad[, "xmid"] <-  - ((Asym * (.expr3 * (1/scal)))/.expr10)
                  .grad[, "scal"] <- (Asym * (.expr3 * (.expr1/(scal^2))))/.expr10
                  dimnames(.grad) <- list(NULL, .actualArgs)
                  attr(.value, "gradient") <- .grad
              }
              .value
          },
              function(mCall, data, LHS)
          {
              xy <- data.frame(sortedXyData(mCall[["input"]], LHS, data))
              if(nrow(xy) < 4) {
                  stop("Too few distinct input values to fit a logistic")
              }
              z <- xy[["y"]]
              if (min(z) <= 0) { z <- z - 1.05 * min(z) } # avoid zeroes
              z <- z/(1.05 * max(z))		# scale to within unit height
              xy[["z"]] <- log(z/(1 - z))		# logit transformation
              aux <- coef(lm(x ~ z, xy))
              pars <- as.vector(coef(nls(y ~ 1/(1 + exp((xmid - x)/scal)),
                                         data = xy,
                                         start = list(xmid = aux[1], scal = aux[2]),
                                         algorithm = "plinear")))
              value <- c(pars[3], pars[1], pars[2])
              names(value) <- mCall[c("Asym", "xmid", "scal")]
              value
          }, parameters = c("Asym", "xmid", "scal"))

##*## SSmicmen - Michaelis-Menten model for enzyme kinetics.

SSmicmen <- # selfStart(~ Vm * input/(K + input),
    selfStart(
              function(input, Vm, K)
          {
              .expr1 <- Vm * input
              .expr2 <- K + input
              .value <- .expr1/.expr2
              .actualArgs <- as.list(match.call()[c("Vm", "K")])
              if(all(unlist(lapply(.actualArgs, is.name))))
              {
                  .grad <- array(0, c(length(.value), 2), list(NULL, c("Vm", "K")))
                  .grad[, "Vm"] <- input/.expr2
                  .grad[, "K"] <-  - (.expr1/(.expr2^2))
                  dimnames(.grad) <- list(NULL, .actualArgs)
                  attr(.value, "gradient") <- .grad
              }
              .value
          },
              function(mCall, data, LHS)
          {
              xy <- data.frame(sortedXyData(mCall[["input"]], LHS, data))
              if (nrow(xy) < 3) {
                  stop("Too few distinct input values to fit a Michaelis-Menten")
              }
              ## take the inverse transformation
              pars <- as.vector(coef(lm(1/y ~ I(1/x), data = xy)))
              ## use the partially linear form to converge quickly
              pars <- as.vector(coef(nls(y ~ x/(K + x),
                                         data = xy,
                                         start = list(K = abs(pars[2]/pars[1])),
                                         algorithm = "plinear")))
              value <- c(pars[2], pars[1])
              names(value) <- mCall[c("Vm", "K")]
              value
          }, parameters = c("Vm", "K"))

SSgompertz <- #    selfStart( ~ Asym * exp(-b2*b3^x),
    ## Gompertz model for growth curve data
    selfStart(function(x, Asym, b2, b3)
          {
              .expr2 <- b3^x
              .expr4 <- exp(-b2 * .expr2)
              .value <- Asym * .expr4
              .actualArgs <- as.list(match.call()[c("Asym", "b2", "b3")])
              if(all(unlist(lapply(.actualArgs, is.name))))
              {
                  .grad <- array(0, c(length(.value), 3),
                                 list(NULL, c("Asym", "b2", "b3")))
                  .grad[, "Asym"] <- .expr4
                  .grad[, "b2"] <- -Asym * (.expr4 * .expr2)
                  .grad[, "b3"] <- -Asym * (.expr4 * (b2 * (b3^(x - 1) * x)))
                  dimnames(.grad) <- list(NULL, .actualArgs)
                  attr(.value, "gradient") <- .grad
              }
              .value
          },
              function(mCall, data, LHS)
          {
              xy <- sortedXyData(mCall[["x"]], LHS, data)
              if (nrow(xy) < 4) {
                  stop("Too few distinct input values to fit the Gompertz model")
              }
              xyL <- xy
              xyL$y <- log(abs(xyL$y))
              pars <- NLSstAsymptotic(xyL)
              pars <- coef(nls(y ~ exp(-b2*b3^x),
                               data = xy,
                               alg = "plinear",
                               start = c(b2 = pars[["b1"]],
                               b3 = exp(-exp(pars[["lrc"]])))))
              val <- pars[c(3,1,2)]
              names(val) <- mCall[c("Asym", "b2", "b3")]
              val
          },
              c("Asym", "b2", "b3"))

SSweibull <- # selfStart( ~ Asym - Drop * exp(-exp(lrc)*x^pwr),
    ## Weibull model for growth curve data
    selfStart( function(x, Asym, Drop, lrc, pwr)
          {
              .expr1 <- exp(lrc)
              .expr3 <- x^pwr
              .expr5 <- exp(-.expr1 * .expr3)
              .value <- Asym - Drop * .expr5
              .actualArgs <- as.list(match.call()[c("Asym", "Drop", "lrc", "pwr")])
              if(all(unlist(lapply(.actualArgs, is.name))))
              {
                  .grad <- array(0, c(length(.value), 4),
                                 list(NULL, c("Asym", "Drop", "lrc", "pwr")))
                  .grad[, "Asym"] <- 1
                  .grad[, "Drop"] <- -.expr5
                  .grad[, "lrc"] <- Drop * (.expr5 * (.expr1 * .expr3))
                  .grad[, "pwr"] <- Drop * (.expr5 * (.expr1 * (.expr3 * log(x))))
                  dimnames(.grad) <- list(NULL, .actualArgs)
                  attr(.value, "gradient") <- .grad
              }
              .value
          },
              function(mCall, data, LHS)
          {
              xy <- sortedXyData(mCall[["x"]], LHS, data)
              if (nrow(xy) < 5) {
                  stop("Too few distinct input values to fit the Weibull growth model")
              }
              if (any(xy[["x"]] < 0)) {
                  error("All x values must be non-negative to fit the Weibull growth model")
              }
              Rasym <- NLSstRtAsymptote(xy)
              Lasym <- NLSstLfAsymptote(xy)
              pars <- coef(lm(log(-log((Rasym - y)/(Rasym - Lasym))) ~ log(x),
                             data = xy, subset = x > 0))
              val <- coef(nls(y ~ cbind(1, -exp(-exp(lrc)*x^pwr)),
                               data = xy,
                               alg = "plinear",
                               start = c(lrc = pars[[1]], pwr = pars[[2]])))[
                                                         c(3,4,1,2)]
              names(val) <- mCall[c("Asym", "Drop", "lrc", "pwr")]
              val
          },
              c("Asym", "Drop", "lrc", "pwr"))


### Local variables:
### mode: S
### End:
