Here is a c++ function to get the gradient of a c-value based on the formulas given by Linas Vepstas. The gradient is returned by reference to the variables gzx and gzy. I also return the length and derivative values by reference to make it work with my other function below.

void getGradient(long double cx, long double cy, long double &xDer,

long double &yDer, long double &gzx, long double &gzy,

long double &length, unsigned int maxIter) {

// based on gradient equation found at:

// http://linas.org/art-gallery/escape/ray.html

long double zx = 0.0L;

long double zy = 0.0L;

xDer = 0.0L; yDer = 0.0L; // initialize values

long double xTemp;

unsigned int n = 0;

long double huge = std::numeric_limits<float>::max();

while ((zx*zx + zy*zy) < huge && n < maxIter) {

// compute new derivative:

xTemp = 2.0L*(zx*xDer - zy*yDer) + 1.0L;

yDer = 2.0L*(zx*yDer + zy*xDer);

xDer = xTemp;

// compute new z value:

xTemp = zx*zx - zy*zy + cx;

zy = 2.0L*zx*zy + cy;

zx = xTemp;

n++;

}

length = std::sqrt(zx*zx + zy*zy);

long double m = n + 1 - (std::log(std::log(length)) / std::log(2.0L));

long double f = pow(2.0L,-m); // Douady-Hubbard potential

long double Dx = zx*(xDer/2.0) - zy*(-yDer/2.0); // real part of zn * Dn

long double Dy = zx*(-yDer/2.0) + zy*(xDer/2.0); // imag part of zn * Dn

gzx = f*Dx / (length*length*std::log(length)); // x-gradient for cx,cy

gzy = f*Dy / (length*length*std::log(length)); // y-gradient for cx,cy

}

And here is a function that uses the getGradeint function to perform Runge-Kutta integration and find the external angle. It's assumed that the point has already been checked for being outside the set.

double externalAngle(long double cx, long double cy, std::size_t maxIter) {

// uses 4th order Runge-Kutta integration to find external angle

long double huge = 640000.0; // bigger is better, but also slower

long double ckx = cx;

long double cky = cy;

long double ckxTemp, ckyTemp;

long double xDer,yDer,length,derLength,dist,gzx,gzy,gzxA,gzyA,gzxB,gzyB,gzxC,gzyC,gzxD,gzyD,gzlength;

unsigned int k = 0;

unsigned int maxStep = maxIter*2; // make sure we try to walk far away

while((ckx*ckx + cky*cky) < huge && k++ < maxStep) {

// integrate with Runge-Kutta outwards along gradient:

getGradient(ckx,cky,xDer,yDer,gzxA,gzyA,length,maxIter);

derLength = std::sqrt(xDer*xDer + yDer*yDer);

dist = 0.5L * std::log(length) * length / derLength;

dist = std::min(16.0L,0.5*dist); // reduce step size

gzlength = std::sqrt(gzxA*gzxA + gzyA*gzyA);

ckxTemp = ckx + (0.5 * dist * gzxA/gzlength); // walk to midpoint

ckyTemp = cky + (0.5 * dist * gzyA/gzlength); // using A

getGradient(ckxTemp,ckyTemp,xDer,yDer,gzxB,gzyB,length,maxIter);

gzlength = std::sqrt(gzxB*gzxB + gzyB*gzyB);

ckxTemp = ckx + (0.5 * dist * gzxB/gzlength); // walk to midpoint

ckyTemp = cky + (0.5 * dist * gzyB/gzlength); // using B

getGradient(ckxTemp,ckyTemp,xDer,yDer,gzxC,gzyC,length,maxIter);

gzlength = std::sqrt(gzxC*gzxC + gzyC*gzyC);

ckxTemp = ckx + (dist * gzxC/gzlength); // walk full step

ckyTemp = cky + (dist * gzyC/gzlength); // using C

getGradient(ckxTemp,ckyTemp,xDer,yDer,gzxD,gzyD,length,maxIter);

gzx = (1.0L/6.0L) * (gzxA + 2.0L*(gzxB + gzxC) + gzxD); // average the values together

gzy = (1.0L/6.0L) * (gzyA + 2.0L*(gzyB + gzyC) + gzyD);

gzlength = std::sqrt(gzx*gzx + gzy*gzy); // get length to normalize

ckx += dist * gzx/gzlength; // normalize gradient and scale by dist

cky += dist * gzy/gzlength;

}

return (std::atan2(cky,ckx));

}

Even though Runge-Kutta method requires solving the gradient 4 times per step, it winds up being much faster than Euler integration in practice because it is so much more accurate. Notice that I used

dist = std::min(16.0L,0.5*dist);

for the step size. To get an image just as smooth with plain Euler integration required reducing the step size to

dist = std::min(1.0L,0.05*dist);

which was way too slow.