diff options
Diffstat (limited to 'ksvg/impl/libs/art_support/art_rgba_svp.c')
-rw-r--r-- | ksvg/impl/libs/art_support/art_rgba_svp.c | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/ksvg/impl/libs/art_support/art_rgba_svp.c b/ksvg/impl/libs/art_support/art_rgba_svp.c index 47c7d924..ea2c1476 100644 --- a/ksvg/impl/libs/art_support/art_rgba_svp.c +++ b/ksvg/impl/libs/art_support/art_rgba_svp.c @@ -37,7 +37,7 @@ struct _ArtKSVGRgbaSVPAlphaData { art_u8 r, g, b, alpha; art_u32 rgba; art_u8 *buf; - art_u8 *mask; + art_u8 *tqmask; int rowstride; int x0, x1; int y0; @@ -124,7 +124,7 @@ art_ksvg_rgba_run_alpha (art_u8 *buf, art_u8 r, art_u8 g, art_u8 b, int alpha, i } static void -art_ksvg_rgba_mask_run_alpha (art_u8 *buf, art_u8 *mask, art_u8 r, art_u8 g, art_u8 b, int alpha, int n) +art_ksvg_rgba_tqmask_run_alpha (art_u8 *buf, art_u8 *tqmask, art_u8 r, art_u8 g, art_u8 b, int alpha, int n) { int i; int v; @@ -136,7 +136,7 @@ art_ksvg_rgba_mask_run_alpha (art_u8 *buf, art_u8 *mask, art_u8 r, art_u8 g, art for(i = 0; i < n; i++) { - am = (alpha * *mask++) + 0x80; + am = (alpha * *tqmask++) + 0x80; am = (am + (am >> 8)) >> 8; v = *buf; @@ -330,7 +330,7 @@ art_ksvg_rgba_svp_alpha_opaque_callback(void *callback_data, int y, } static void -art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y, +art_ksvg_rgba_svp_alpha_tqmask_callback(void *callback_data, int y, int start, ArtSVPRenderAAStep *steps, int n_steps) { ArtKSVGRgbaSVPAlphaData *data = (ArtKSVGRgbaSVPAlphaData *)callback_data; @@ -342,7 +342,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y, art_u8 r, g, b; int *alphatab; int alpha; - art_u8 *maskbuf; + art_u8 *tqmaskbuf; linebuf = data->buf; x0 = data->x0; @@ -353,7 +353,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y, b = data->b; alphatab = data->alphatab; - maskbuf = data->mask + (y - data->y0) * (data->x1 - data->x0); + tqmaskbuf = data->tqmask + (y - data->y0) * (data->x1 - data->x0); if(n_steps > 0) { @@ -362,7 +362,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y, { alpha = (running_sum >> 16) & 0xff; if(alpha) - art_ksvg_rgba_mask_run_alpha (linebuf, maskbuf, + art_ksvg_rgba_tqmask_run_alpha (linebuf, tqmaskbuf, r, g, b, alphatab[alpha], run_x1 - x0); } @@ -376,7 +376,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y, { alpha = (running_sum >> 16) & 0xff; if(alpha) - art_ksvg_rgba_mask_run_alpha (linebuf + (run_x0 - x0) * 4, maskbuf + (run_x0 - x0), + art_ksvg_rgba_tqmask_run_alpha (linebuf + (run_x0 - x0) * 4, tqmaskbuf + (run_x0 - x0), r, g, b, alphatab[alpha], run_x1 - run_x0); } @@ -386,7 +386,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y, { alpha = (running_sum >> 16) & 0xff; if(alpha) - art_ksvg_rgba_mask_run_alpha (linebuf + (run_x1 - x0) * 4, maskbuf + (run_x1 - x0) , + art_ksvg_rgba_tqmask_run_alpha (linebuf + (run_x1 - x0) * 4, tqmaskbuf + (run_x1 - x0) , r, g, b, alphatab[alpha], x1 - run_x1); } @@ -395,7 +395,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y, { alpha = (running_sum >> 16) & 0xff; if(alpha) - art_ksvg_rgba_mask_run_alpha (linebuf, maskbuf, + art_ksvg_rgba_tqmask_run_alpha (linebuf, tqmaskbuf, r, g, b, alphatab[alpha], x1 - x0); } @@ -415,7 +415,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y, * @rowstride: Rowstride of @buf buffer. * @alphagamma: #ArtAlphaGamma for gamma-correcting the compositing. * - * Renders the shape specified with @svp over the @buf RGB buffer. + * Renders the tqshape specified with @svp over the @buf RGB buffer. * @x1 - @x0 specifies the width, and @y1 - @y0 specifies the height, * of the rectangle rendered. The new pixels are stored starting at * the first byte of @buf. Thus, the @x0 and @y0 parameters specify @@ -436,7 +436,7 @@ art_ksvg_rgba_svp_alpha(const ArtSVP *svp, art_u32 rgba, art_u8 *buf, int rowstride, ArtAlphaGamma *alphagamma, - art_u8 *mask) + art_u8 *tqmask) { ArtKSVGRgbaSVPAlphaData data; int r, g, b; @@ -454,7 +454,7 @@ art_ksvg_rgba_svp_alpha(const ArtSVP *svp, data.b = b; data.alpha = alpha; data.rgba = rgba; - data.mask = mask; + data.tqmask = tqmask; a = 0x8000; da = (alpha * 66051 + 0x80) >> 8; /* 66051 equals 2 ^ 32 / (255 * 255) */ @@ -471,8 +471,8 @@ art_ksvg_rgba_svp_alpha(const ArtSVP *svp, data.x1 = x1; data.y0 = y0; - if(mask) - art_svp_render_aa (svp, x0, y0, x1, y1, art_ksvg_rgba_svp_alpha_mask_callback, &data); + if(tqmask) + art_svp_render_aa (svp, x0, y0, x1, y1, art_ksvg_rgba_svp_alpha_tqmask_callback, &data); else { if (alpha == 255) @@ -485,7 +485,7 @@ art_ksvg_rgba_svp_alpha(const ArtSVP *svp, /* RGB renderers */ static void -art_ksvg_rgb_mask_run_alpha(art_u8 *buf, art_u8 *mask, art_u8 r, art_u8 g, art_u8 b, int alpha, int n) +art_ksvg_rgb_tqmask_run_alpha(art_u8 *buf, art_u8 *tqmask, art_u8 r, art_u8 g, art_u8 b, int alpha, int n) { int i; int v; @@ -497,7 +497,7 @@ art_ksvg_rgb_mask_run_alpha(art_u8 *buf, art_u8 *mask, art_u8 r, art_u8 g, art_u for(i = 0; i < n; i++) { - am = (alpha * *mask++) + 0x80; + am = (alpha * *tqmask++) + 0x80; am = (am + (am >> 8)) >> 8; v = *buf; @@ -515,7 +515,7 @@ art_ksvg_rgb_mask_run_alpha(art_u8 *buf, art_u8 *mask, art_u8 r, art_u8 g, art_u } static void -art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y, +art_ksvg_rgb_svp_alpha_tqmask_callback(void *callback_data, int y, int start, ArtSVPRenderAAStep *steps, int n_steps) { ArtKSVGRgbaSVPAlphaData *data = (ArtKSVGRgbaSVPAlphaData *)callback_data; @@ -527,7 +527,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y, art_u8 r, g, b; int *alphatab; int alpha; - art_u8 *maskbuf; + art_u8 *tqmaskbuf; linebuf = data->buf; x0 = data->x0; @@ -538,7 +538,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y, b = data->b; alphatab = data->alphatab; - maskbuf = data->mask + (y - data->y0) * (data->x1 - data->x0); + tqmaskbuf = data->tqmask + (y - data->y0) * (data->x1 - data->x0); if(n_steps > 0) { @@ -547,7 +547,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y, { alpha = (running_sum >> 16) & 0xff; if(alpha) - art_ksvg_rgb_mask_run_alpha (linebuf, maskbuf, + art_ksvg_rgb_tqmask_run_alpha (linebuf, tqmaskbuf, r, g, b, alphatab[alpha], run_x1 - x0); } @@ -561,7 +561,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y, { alpha = (running_sum >> 16) & 0xff; if(alpha) - art_ksvg_rgb_mask_run_alpha (linebuf + (run_x0 - x0) * 3, maskbuf + (run_x0 - x0), + art_ksvg_rgb_tqmask_run_alpha (linebuf + (run_x0 - x0) * 3, tqmaskbuf + (run_x0 - x0), r, g, b, alphatab[alpha], run_x1 - run_x0); } @@ -571,7 +571,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y, { alpha = (running_sum >> 16) & 0xff; if(alpha) - art_ksvg_rgb_mask_run_alpha (linebuf + (run_x1 - x0) * 3, maskbuf + (run_x1 - x0) , + art_ksvg_rgb_tqmask_run_alpha (linebuf + (run_x1 - x0) * 3, tqmaskbuf + (run_x1 - x0) , r, g, b, alphatab[alpha], x1 - run_x1); } @@ -580,7 +580,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y, { alpha = (running_sum >> 16) & 0xff; if(alpha) - art_ksvg_rgb_mask_run_alpha (linebuf, maskbuf, + art_ksvg_rgb_tqmask_run_alpha (linebuf, tqmaskbuf, r, g, b, alphatab[alpha], x1 - x0); } @@ -600,7 +600,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y, * @rowstride: Rowstride of @buf buffer. * @alphagamma: #ArtAlphaGamma for gamma-correcting the compositing. * - * Renders the shape specified with @svp over the @buf RGB buffer. + * Renders the tqshape specified with @svp over the @buf RGB buffer. * @x1 - @x0 specifies the width, and @y1 - @y0 specifies the height, * of the rectangle rendered. The new pixels are stored starting at * the first byte of @buf. Thus, the @x0 and @y0 parameters specify @@ -616,12 +616,12 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y, * @alphagamma is NULL. **/ void -art_ksvg_rgb_svp_alpha_mask(const ArtSVP *svp, +art_ksvg_rgb_svp_alpha_tqmask(const ArtSVP *svp, int x0, int y0, int x1, int y1, art_u32 rgba, art_u8 *buf, int rowstride, ArtAlphaGamma *alphagamma, - art_u8 *mask) + art_u8 *tqmask) { ArtKSVGRgbaSVPAlphaData data; int r, g, b, alpha; @@ -637,7 +637,7 @@ art_ksvg_rgb_svp_alpha_mask(const ArtSVP *svp, data.g = g; data.b = b; data.alpha = alpha; - data.mask = mask; + data.tqmask = tqmask; a = 0x8000; da = (alpha * 66051 + 0x80) >> 8; /* 66051 equals 2 ^ 32 / (255 * 255) */ @@ -654,6 +654,6 @@ art_ksvg_rgb_svp_alpha_mask(const ArtSVP *svp, data.x1 = x1; data.y0 = y0; - art_svp_render_aa(svp, x0, y0, x1, y1, art_ksvg_rgb_svp_alpha_mask_callback, &data); + art_svp_render_aa(svp, x0, y0, x1, y1, art_ksvg_rgb_svp_alpha_tqmask_callback, &data); } |