getclf<-function(data, freq)
  {
    # Takes a sorted data frame and returns a likelihood function to be minimized
    nvars<-ncol(data)
    pars<-double(1:(nvars*(nvars+3)/2))
    testdata<-data[cumsum(freq),]
    presabs<-ifelse(is.na(testdata),0,1)

    data<-t(data)   # convert data to vectors that can be passed to C
    presabs<-t(presabs)
    dim(presabs)<-NULL
    dim(data)<-NULL
    data<-data[!is.na(data)]
    
#    if (!is.loaded(symbol.C("evallf"))) {
#        cat("loading object code...\n")
#        dyn.load("st771/libs/st771.so")
#    }

    function(pars){
      .C("evallf",as.double(data),as.integer(nvars),as.integer(freq),
         as.integer(length(freq)),as.integer(presabs),as.double(pars),val=double(1))$val;
    }
  }
getstartvals<-function(x,eps=1e-03)
  {
    # Returns starting values for the relative precision matrix delta 
    n<-ncol(x)
    startvals<-double(n+n*(n+1)/2)
    startvals[1:n]<-apply(x,2,mean,na.rm=T)
    
    sampmat<-cov(x,use="p") # sample var-cov matrix
    eig<-eigen(sampmat,symmetric=TRUE)
    realvals<-sapply(eig$values, function(y) ifelse(is.complex(y),0,y))
    smalleval<-eps*min(realvals[realvals>0])
    posvals<-pmax(smalleval,realvals)
    mypdmat<-eig$vectors %*% diag(posvals) %*% t(eig$vectors)
    myfact<-chol(mypdmat)
    mydel<-solve(myfact,diag(n))
    signchange<-diag(ifelse(diag(mydel)>0,1,-1))
    mydel<-mydel %*% signchange # ensure that diagonal elts are positive                
    startvals[(n+1):(2*n)]<-log(diag(mydel))
    for(i in 2:n){   # assume n>2
      startvals[(2*n+sum(1:(i-1))-i+2):(2*n+sum(1:(i-1)))]<-mydel[1:(i-1),i]
    }
    startvals
  }
make.del<-function(pars)
  {
    # Takes a parameter vector with log of diagonal elements first
    # and then elements above diagonal in column-descending order
    # and fills in the zeros to make an upper-triangular matrix
    k<-floor((-1+sqrt(1+8*length(pars)))/2)
    mymatrix<-diag(exp(pars[1:k]))
    pars<-pars[-(1:k)]
    if (k>1){
      for(i in 2:k){
        mymatrix[1:(i-1),i]<-pars[1:(i-1)]
        pars<-pars[-(1:(i-1))]
      }
    }
    mymatrix
  }

mlest<-function(data,...)
  {
    # Takes MVN data with missing values and calculates the MLE of the mean vector and the var-cov matrix

    data<-as.matrix(data)
    sortlist<-mysort(data) # put data with identical patterns of missingness together
    
    nvars<-ncol(data)
    nobs<-nrow(data)
    if(nvars>50)
      stop("mlest cannot handle more than 50 variables.")

    startvals<-getstartvals(data) # find starting values

    lf<-getclf(data=sortlist$sorted.data, freq=sortlist$freq)
    mle<-nlm(lf,startvals,...)

    muhat<-mle$estimate[1:nvars] # extract estimates of mean
    del<-make.del(mle$estimate[-(1:nvars)]) # extract estimates of sigmahat
    factor<-solve(del,diag(nvars))
    sigmahat<-t(factor) %*% factor
    list(muhat=muhat, sigmahat=sigmahat, value=mle$minimum, gradient=mle$gradient,
         stop.code=mle$code, iterations=mle$iterations)
  }

mysort<-function(x)
  {
    # Sorts rows and cols of incoming dataframe x into/
    # an order for which it is easier to write the likelihood function
    nvars<-ncol(x)
    powers<-as.integer(2^((nvars-1):0))
    binrep<-ifelse(is.na(x),0,1)
    decrep<-binrep %*% powers
    sorted<-x[order(decrep),]
    decrep<-decrep[order(decrep)]

    list(sorted.data=sorted, freq=as.vector(table(decrep)))
  }
.First.lib <- function (lib, pkg)
  library.dynam("mvnmle",pkg,lib)
