Saturday, April 26, 2008

logistic regression on a circle

> x <- rnorm(1000)
> y <- rnorm(1000)
> inside <- ifelse(x^2 + y^2 < 1, 1, 0)
> f <- glm(inside ~ I(y^2)+I(x^2)+x*y, family=binomial())
Warning messages:
1: In glm.fit(x = X, y = Y, weights = weights, start = start, etastart = etastart, :
algorithm did not converge
2: In glm.fit(x = X, y = Y, weights = weights, start = start, etastart = etastart, :
fitted probabilities numerically 0 or 1 occurred
> d <- expand.grid(x=seq(-1,1,len=100), y=seq(-1,1,len=100))
> p <- predict(f, type="response", newdata=d)
> image(matrix(p, ncol=100))


This seems to work pretty well, as one might expect.

Now let's apply this to the Mandelbrot Set:
mset.data <- function(width=200, iterations=100) {
mset <- as.vector(outer(seq(-2, 2, len=width),
seq(-2, 2, len=width) * 1i, FUN="+"))
cmset <- NULL
z <- numeric(length(mset))
for (i in 1:iterations) {
z <- z^2 + mset
outside <- Mod(z) > 2
cmset <- append(cmset, mset[outside])
inside <- !outside
mset <- mset[inside]
z <- z[inside]
}
all <- c(cmset, mset)
all.next <- all + all^2
inside <- rep(c(0, 1), c(length(cmset), length(mset)))
x <- Re(all)
y <- Im(all)
data.frame(inside=inside, x1=Re(all), y1=Im(all),
x2=Re(all.next), y2=Im(all.next))
}

mset.prob <- function(width=200) {
d <- mset.data(width=width)
f <- glm(inside ~ I(x1^2) + I(y1^2) +
x1*y1 + I(x2^2) + I(y2^2) + x2*y2,
data=d, family=binomial())
z1 <- as.vector(outer(seq(-2, 2, len=width),
seq(-2, 2, len=width) * 1i, FUN="+"))
z2 <- z1 + z1^2
p <- predict(f, type="response",
newdata=data.frame(x1=Re(z1), y1=Im(z1),
x2=Re(z2), y2=Im(z2)))
matrix(p, nrow=width, ncol=width)
}

> p <- mset.prob()
> image(seq(-2,2,len=200), seq(-2,2,len=200), p, xlab="x", ylab="y")


How're we doing in actually computing the M-Set?
> md <- mset.data()
> o <- md$inside==1
> plot(c(), xlim=c(-2,2), ylim=c(-2,2), type="n")
> points(md$x[o], md$y[o], pch=1)

Looks about right.

No comments: