Mercurial > emacs
annotate src/region-cache.c @ 40303:a725c601ef70
(show_mouse_face): Clean up. Recognize overwritten
cursor differently.
author | Gerd Moellmann <gerd@gnu.org> |
---|---|
date | Thu, 25 Oct 2001 12:58:15 +0000 |
parents | ee40177f6c68 |
children | f713f6056d87 |
rev | line source |
---|---|
11047 | 1 /* Caching facts about regions of the buffer, for optimization. |
11235 | 2 Copyright (C) 1985, 1986, 1987, 1988, 1989, 1993, 1995 |
11047 | 3 Free Software Foundation, Inc. |
4 | |
5 This file is part of GNU Emacs. | |
6 | |
7 GNU Emacs is free software; you can redistribute it and/or modify | |
8 it under the terms of the GNU General Public License as published by | |
9 the Free Software Foundation; either version 2, or (at your option) | |
10 any later version. | |
11 | |
12 GNU Emacs is distributed in the hope that it will be useful, | |
13 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 GNU General Public License for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with GNU Emacs; see the file COPYING. If not, write to | |
14186
ee40177f6c68
Update FSF's address in the preamble.
Erik Naggum <erik@naggum.no>
parents:
11235
diff
changeset
|
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
ee40177f6c68
Update FSF's address in the preamble.
Erik Naggum <erik@naggum.no>
parents:
11235
diff
changeset
|
20 Boston, MA 02111-1307, USA. */ |
11047 | 21 |
22 | |
23 #include <config.h> | |
24 #include "lisp.h" | |
25 #include "buffer.h" | |
26 #include "region-cache.h" | |
27 | |
28 #include <stdio.h> | |
29 | |
30 | |
31 /* Data structures. */ | |
32 | |
33 /* The region cache. | |
34 | |
35 We want something that maps character positions in a buffer onto | |
36 values. The representation should deal well with long runs of | |
37 characters with the same value. | |
38 | |
39 The tricky part: the representation should be very cheap to | |
40 maintain in the presence of many insertions and deletions. If the | |
41 overhead of maintaining the cache is too high, the speedups it | |
42 offers will be worthless. | |
43 | |
44 | |
45 We represent the region cache as a sorted array of struct | |
46 boundary's, each of which contains a buffer position and a value; | |
47 the value applies to all the characters after the buffer position, | |
48 until the position of the next boundary, or the end of the buffer. | |
49 | |
50 The cache always has a boundary whose position is BUF_BEG, so | |
51 there's always a value associated with every character in the | |
52 buffer. Since the cache is sorted, this is always the first | |
53 element of the cache. | |
54 | |
55 To facilitate the insertion and deletion of boundaries in the | |
56 cache, the cache has a gap, just like Emacs's text buffers do. | |
57 | |
58 To help boundary positions float along with insertions and | |
59 deletions, all boundary positions before the cache gap are stored | |
60 relative to BUF_BEG (buf) (thus they're >= 0), and all boundary | |
61 positions after the gap are stored relative to BUF_Z (buf) (thus | |
62 they're <= 0). Look at BOUNDARY_POS to see this in action. See | |
63 revalidate_region_cache to see how this helps. */ | |
64 | |
65 struct boundary { | |
66 int pos; | |
67 int value; | |
68 }; | |
69 | |
70 struct region_cache { | |
71 /* A sorted array of locations where the known-ness of the buffer | |
72 changes. */ | |
73 struct boundary *boundaries; | |
74 | |
75 /* boundaries[gap_start ... gap_start + gap_len - 1] is the gap. */ | |
76 int gap_start, gap_len; | |
77 | |
78 /* The number of elements allocated to boundaries, not including the | |
79 gap. */ | |
80 int cache_len; | |
81 | |
82 /* The areas that haven't changed since the last time we cleaned out | |
83 invalid entries from the cache. These overlap when the buffer is | |
84 entirely unchanged. */ | |
85 int beg_unchanged, end_unchanged; | |
86 | |
87 /* The first and last positions in the buffer. Because boundaries | |
88 store their positions relative to the start (BEG) and end (Z) of | |
89 the buffer, knowing these positions allows us to accurately | |
90 interpret positions without having to pass the buffer structure | |
91 or its endpoints around all the time. | |
92 | |
93 Yes, buffer_beg is always 1. It's there for symmetry with | |
94 buffer_end and the BEG and BUF_BEG macros. */ | |
95 int buffer_beg, buffer_end; | |
96 }; | |
97 | |
98 /* Return the position of boundary i in cache c. */ | |
99 #define BOUNDARY_POS(c, i) \ | |
100 ((i) < (c)->gap_start \ | |
101 ? (c)->buffer_beg + (c)->boundaries[(i)].pos \ | |
102 : (c)->buffer_end + (c)->boundaries[(c)->gap_len + (i)].pos) | |
103 | |
104 /* Return the value for text after boundary i in cache c. */ | |
105 #define BOUNDARY_VALUE(c, i) \ | |
106 ((i) < (c)->gap_start \ | |
107 ? (c)->boundaries[(i)].value \ | |
108 : (c)->boundaries[(c)->gap_len + (i)].value) | |
109 | |
110 /* Set the value for text after boundary i in cache c to v. */ | |
111 #define SET_BOUNDARY_VALUE(c, i, v) \ | |
112 ((i) < (c)->gap_start \ | |
113 ? ((c)->boundaries[(i)].value = (v))\ | |
114 : ((c)->boundaries[(c)->gap_len + (i)].value = (v))) | |
115 | |
116 | |
117 /* How many elements to add to the gap when we resize the buffer. */ | |
118 #define NEW_CACHE_GAP (40) | |
119 | |
120 /* See invalidate_region_cache; if an invalidation would throw away | |
121 information about this many characters, call | |
122 revalidate_region_cache before doing the new invalidation, to | |
123 preserve that information, instead of throwing it away. */ | |
124 #define PRESERVE_THRESHOLD (500) | |
125 | |
126 static void revalidate_region_cache (); | |
127 | |
128 | |
129 /* Interface: Allocating, initializing, and disposing of region caches. */ | |
130 | |
131 struct region_cache * | |
132 new_region_cache () | |
133 { | |
134 struct region_cache *c | |
135 = (struct region_cache *) xmalloc (sizeof (struct region_cache)); | |
136 | |
137 c->gap_start = 0; | |
138 c->gap_len = NEW_CACHE_GAP; | |
139 c->cache_len = 0; | |
140 c->boundaries = | |
141 (struct boundary *) xmalloc ((c->gap_len + c->cache_len) | |
142 * sizeof (*c->boundaries)); | |
143 | |
144 c->beg_unchanged = 0; | |
145 c->end_unchanged = 0; | |
146 c->buffer_beg = 1; | |
147 c->buffer_end = 1; | |
148 | |
149 /* Insert the boundary for the buffer start. */ | |
150 c->cache_len++; | |
151 c->gap_len--; | |
152 c->gap_start++; | |
153 c->boundaries[0].pos = 0; /* from buffer_beg */ | |
154 c->boundaries[0].value = 0; | |
155 | |
156 return c; | |
157 } | |
158 | |
159 void | |
160 free_region_cache (c) | |
161 struct region_cache *c; | |
162 { | |
163 xfree (c->boundaries); | |
164 xfree (c); | |
165 } | |
166 | |
167 | |
168 /* Finding positions in the cache. */ | |
169 | |
170 /* Return the index of the last boundary in cache C at or before POS. | |
171 In other words, return the boundary that specifies the value for | |
172 the region POS..(POS + 1). | |
173 | |
174 This operation should be logarithmic in the number of cache | |
175 entries. It would be nice if it took advantage of locality of | |
176 reference, too, by searching entries near the last entry found. */ | |
177 static int | |
178 find_cache_boundary (c, pos) | |
179 struct region_cache *c; | |
180 int pos; | |
181 { | |
182 int low = 0, high = c->cache_len; | |
183 | |
184 while (low + 1 < high) | |
185 { | |
186 /* mid is always a valid index, because low < high and ">> 1" | |
187 rounds down. */ | |
188 int mid = (low + high) >> 1; | |
189 int boundary = BOUNDARY_POS (c, mid); | |
190 | |
191 if (pos < boundary) | |
192 high = mid; | |
193 else | |
194 low = mid; | |
195 } | |
196 | |
197 /* Some testing. */ | |
198 if (BOUNDARY_POS (c, low) > pos | |
199 || (low + 1 < c->cache_len | |
200 && BOUNDARY_POS (c, low + 1) <= pos)) | |
201 abort (); | |
202 | |
203 return low; | |
204 } | |
205 | |
206 | |
207 | |
208 /* Moving the cache gap around, inserting, and deleting. */ | |
209 | |
210 | |
211 /* Move the gap of cache C to index POS, and make sure it has space | |
212 for at least MIN_SIZE boundaries. */ | |
213 static void | |
214 move_cache_gap (c, pos, min_size) | |
215 struct region_cache *c; | |
216 int pos; | |
217 int min_size; | |
218 { | |
219 /* Copy these out of the cache and into registers. */ | |
220 int gap_start = c->gap_start; | |
221 int gap_len = c->gap_len; | |
222 int buffer_beg = c->buffer_beg; | |
223 int buffer_end = c->buffer_end; | |
224 | |
225 if (pos < 0 | |
226 || pos > c->cache_len) | |
227 abort (); | |
228 | |
229 /* We mustn't ever try to put the gap before the dummy start | |
230 boundary. That must always be start-relative. */ | |
231 if (pos == 0) | |
232 abort (); | |
233 | |
234 /* Need we move the gap right? */ | |
235 while (gap_start < pos) | |
236 { | |
237 /* Copy one boundary from after to before the gap, and | |
238 convert its position to start-relative. */ | |
239 c->boundaries[gap_start].pos | |
240 = (buffer_end | |
241 + c->boundaries[gap_start + gap_len].pos | |
242 - buffer_beg); | |
243 c->boundaries[gap_start].value | |
244 = c->boundaries[gap_start + gap_len].value; | |
245 gap_start++; | |
246 } | |
247 | |
248 /* To enlarge the gap, we need to re-allocate the boundary array, and | |
249 then shift the area after the gap to the new end. Since the cost | |
250 is proportional to the amount of stuff after the gap, we do the | |
251 enlargement here, after a right shift but before a left shift, | |
252 when the portion after the gap is smallest. */ | |
253 if (gap_len < min_size) | |
254 { | |
255 int i; | |
256 | |
257 /* Always make at least NEW_CACHE_GAP elements, as long as we're | |
258 expanding anyway. */ | |
259 if (min_size < NEW_CACHE_GAP) | |
260 min_size = NEW_CACHE_GAP; | |
261 | |
262 c->boundaries = | |
263 (struct boundary *) xrealloc (c->boundaries, | |
264 ((min_size + c->cache_len) | |
265 * sizeof (*c->boundaries))); | |
266 | |
267 /* Some systems don't provide a version of the copy routine that | |
268 can be trusted to shift memory upward into an overlapping | |
269 region. memmove isn't widely available. */ | |
270 min_size -= gap_len; | |
271 for (i = c->cache_len - 1; i >= gap_start; i--) | |
272 { | |
273 c->boundaries[i + min_size].pos = c->boundaries[i + gap_len].pos; | |
274 c->boundaries[i + min_size].value = c->boundaries[i + gap_len].value; | |
275 } | |
276 | |
277 gap_len = min_size; | |
278 } | |
279 | |
280 /* Need we move the gap left? */ | |
281 while (pos < gap_start) | |
282 { | |
283 gap_start--; | |
284 | |
285 /* Copy one region from before to after the gap, and | |
286 convert its position to end-relative. */ | |
287 c->boundaries[gap_start + gap_len].pos | |
288 = c->boundaries[gap_start].pos + buffer_beg - buffer_end; | |
289 c->boundaries[gap_start + gap_len].value | |
290 = c->boundaries[gap_start].value; | |
291 } | |
292 | |
293 /* Assign these back into the cache. */ | |
294 c->gap_start = gap_start; | |
295 c->gap_len = gap_len; | |
296 } | |
297 | |
298 | |
299 /* Insert a new boundary in cache C; it will have cache index INDEX, | |
300 and have the specified POS and VALUE. */ | |
301 static void | |
302 insert_cache_boundary (c, index, pos, value) | |
303 struct region_cache *c; | |
304 int index; | |
305 int pos, value; | |
306 { | |
307 /* index must be a valid cache index. */ | |
308 if (index < 0 || index > c->cache_len) | |
309 abort (); | |
310 | |
311 /* We must never want to insert something before the dummy first | |
312 boundary. */ | |
313 if (index == 0) | |
314 abort (); | |
315 | |
316 /* We must only be inserting things in order. */ | |
317 if (! (BOUNDARY_POS (c, index-1) < pos | |
318 && (index == c->cache_len | |
319 || pos < BOUNDARY_POS (c, index)))) | |
320 abort (); | |
321 | |
322 /* The value must be different from the ones around it. However, we | |
323 temporarily create boundaries that establish the same value as | |
324 the subsequent boundary, so we're not going to flag that case. */ | |
325 if (BOUNDARY_VALUE (c, index-1) == value) | |
326 abort (); | |
327 | |
328 move_cache_gap (c, index, 1); | |
329 | |
330 c->boundaries[index].pos = pos - c->buffer_beg; | |
331 c->boundaries[index].value = value; | |
332 c->gap_start++; | |
333 c->gap_len--; | |
334 c->cache_len++; | |
335 } | |
336 | |
337 | |
338 /* Delete the i'th entry from cache C if START <= i < END. */ | |
339 | |
340 static void | |
341 delete_cache_boundaries (c, start, end) | |
342 struct region_cache *c; | |
343 int start, end; | |
344 { | |
345 int len = end - start; | |
346 | |
347 /* Gotta be in range. */ | |
348 if (start < 0 | |
349 || end > c->cache_len) | |
350 abort (); | |
351 | |
352 /* Gotta be in order. */ | |
353 if (start > end) | |
354 abort (); | |
355 | |
356 /* Can't delete the dummy entry. */ | |
357 if (start == 0 | |
358 && end >= 1) | |
359 abort (); | |
360 | |
361 /* Minimize gap motion. If we're deleting nothing, do nothing. */ | |
362 if (len == 0) | |
363 ; | |
364 /* If the gap is before the region to delete, delete from the start | |
365 forward. */ | |
366 else if (c->gap_start <= start) | |
367 { | |
368 move_cache_gap (c, start, 0); | |
369 c->gap_len += len; | |
370 } | |
371 /* If the gap is after the region to delete, delete from the end | |
372 backward. */ | |
373 else if (end <= c->gap_start) | |
374 { | |
375 move_cache_gap (c, end, 0); | |
376 c->gap_start -= len; | |
377 c->gap_len += len; | |
378 } | |
379 /* If the gap is in the region to delete, just expand it. */ | |
380 else | |
381 { | |
382 c->gap_start = start; | |
383 c->gap_len += len; | |
384 } | |
385 | |
386 c->cache_len -= len; | |
387 } | |
388 | |
389 | |
390 | |
391 /* Set the value for a region. */ | |
392 | |
393 /* Set the value in cache C for the region START..END to VALUE. */ | |
394 static void | |
395 set_cache_region (c, start, end, value) | |
396 struct region_cache *c; | |
397 int start, end; | |
398 int value; | |
399 { | |
400 if (start > end) | |
401 abort (); | |
402 if (start < c->buffer_beg | |
403 || end > c->buffer_end) | |
404 abort (); | |
405 | |
406 /* Eliminate this case; then we can assume that start and end-1 are | |
407 both the locations of real characters in the buffer. */ | |
408 if (start == end) | |
409 return; | |
410 | |
411 { | |
412 /* We need to make sure that there are no boundaries in the area | |
413 between start to end; the whole area will have the same value, | |
414 so those boundaries will not be necessary. | |
415 | |
416 Let start_ix be the cache index of the boundary governing the | |
417 first character of start..end, and let end_ix be the cache | |
418 index of the earliest boundary after the last character in | |
419 start..end. (This tortured terminology is intended to answer | |
420 all the "< or <=?" sort of questions.) */ | |
421 int start_ix = find_cache_boundary (c, start); | |
422 int end_ix = find_cache_boundary (c, end - 1) + 1; | |
423 | |
424 /* We must remember the value established by the last boundary | |
425 before end; if that boundary's domain stretches beyond end, | |
426 we'll need to create a new boundary at end, and that boundary | |
427 must have that remembered value. */ | |
428 int value_at_end = BOUNDARY_VALUE (c, end_ix - 1); | |
429 | |
430 /* Delete all boundaries strictly within start..end; this means | |
431 those whose indices are between start_ix (exclusive) and end_ix | |
432 (exclusive). */ | |
433 delete_cache_boundaries (c, start_ix + 1, end_ix); | |
434 | |
435 /* Make sure we have the right value established going in to | |
436 start..end from the left, and no unnecessary boundaries. */ | |
437 if (BOUNDARY_POS (c, start_ix) == start) | |
438 { | |
439 /* Is this boundary necessary? If no, remove it; if yes, set | |
440 its value. */ | |
441 if (start_ix > 0 | |
442 && BOUNDARY_VALUE (c, start_ix - 1) == value) | |
443 { | |
444 delete_cache_boundaries (c, start_ix, start_ix + 1); | |
445 start_ix--; | |
446 } | |
447 else | |
448 SET_BOUNDARY_VALUE (c, start_ix, value); | |
449 } | |
450 else | |
451 { | |
452 /* Do we need to add a new boundary here? */ | |
453 if (BOUNDARY_VALUE (c, start_ix) != value) | |
454 { | |
455 insert_cache_boundary (c, start_ix + 1, start, value); | |
456 start_ix++; | |
457 } | |
458 } | |
459 | |
460 /* This is equivalent to letting end_ix float (like a buffer | |
461 marker does) with the insertions and deletions we may have | |
462 done. */ | |
463 end_ix = start_ix + 1; | |
464 | |
465 /* Make sure we have the correct value established as we leave | |
466 start..end to the right. */ | |
467 if (end == c->buffer_end) | |
468 /* There is no text after start..end; nothing to do. */ | |
469 ; | |
470 else if (end_ix >= c->cache_len | |
471 || end < BOUNDARY_POS (c, end_ix)) | |
472 { | |
473 /* There is no boundary at end, but we may need one. */ | |
474 if (value_at_end != value) | |
475 insert_cache_boundary (c, end_ix, end, value_at_end); | |
476 } | |
477 else | |
478 { | |
479 /* There is a boundary at end; should it be there? */ | |
480 if (value == BOUNDARY_VALUE (c, end_ix)) | |
481 delete_cache_boundaries (c, end_ix, end_ix + 1); | |
482 } | |
483 } | |
484 } | |
485 | |
486 | |
487 | |
488 /* Interface: Invalidating the cache. Private: Re-validating the cache. */ | |
489 | |
490 /* Indicate that a section of BUF has changed, to invalidate CACHE. | |
491 HEAD is the number of chars unchanged at the beginning of the buffer. | |
492 TAIL is the number of chars unchanged at the end of the buffer. | |
493 NOTE: this is *not* the same as the ending position of modified | |
494 region. | |
495 (This way of specifying regions makes more sense than absolute | |
496 buffer positions in the presence of insertions and deletions; the | |
497 args to pass are the same before and after such an operation.) */ | |
498 void | |
499 invalidate_region_cache (buf, c, head, tail) | |
500 struct buffer *buf; | |
501 struct region_cache *c; | |
502 int head, tail; | |
503 { | |
504 /* Let chead = c->beg_unchanged, and | |
505 ctail = c->end_unchanged. | |
506 If z-tail < beg+chead by a large amount, or | |
507 z-ctail < beg+head by a large amount, | |
508 | |
509 then cutting back chead and ctail to head and tail would lose a | |
510 lot of information that we could preserve by revalidating the | |
511 cache before processing this invalidation. Losing that | |
512 information may be more costly than revalidating the cache now. | |
513 So go ahead and call revalidate_region_cache if it seems that it | |
514 might be worthwhile. */ | |
515 if (((BUF_BEG (buf) + c->beg_unchanged) - (BUF_Z (buf) - tail) | |
516 > PRESERVE_THRESHOLD) | |
517 || ((BUF_BEG (buf) + head) - (BUF_Z (buf) - c->end_unchanged) | |
518 > PRESERVE_THRESHOLD)) | |
519 revalidate_region_cache (buf, c); | |
520 | |
521 | |
522 if (head < c->beg_unchanged) | |
523 c->beg_unchanged = head; | |
524 if (tail < c->end_unchanged) | |
525 c->end_unchanged = tail; | |
526 | |
527 /* We now know nothing about the region between the unchanged head | |
528 and the unchanged tail (call it the "modified region"), not even | |
529 its length. | |
530 | |
531 If the modified region has shrunk in size (deletions do this), | |
532 then the cache may now contain boundaries originally located in | |
533 text that doesn't exist any more. | |
534 | |
535 If the modified region has increased in size (insertions do | |
536 this), then there may now be boundaries in the modified region | |
537 whose positions are wrong. | |
538 | |
539 Even calling BOUNDARY_POS on boundaries still in the unchanged | |
540 head or tail may well give incorrect answers now, since | |
541 c->buffer_beg and c->buffer_end may well be wrong now. (Well, | |
542 okay, c->buffer_beg never changes, so boundaries in the unchanged | |
543 head will still be okay. But it's the principle of the thing.) | |
544 | |
545 So things are generally a mess. | |
546 | |
547 But we don't clean up this mess here; that would be expensive, | |
548 and this function gets called every time any buffer modification | |
549 occurs. Rather, we can clean up everything in one swell foop, | |
550 accounting for all the modifications at once, by calling | |
551 revalidate_region_cache before we try to consult the cache the | |
552 next time. */ | |
553 } | |
554 | |
555 | |
556 /* Clean out any cache entries applying to the modified region, and | |
557 make the positions of the remaining entries accurate again. | |
558 | |
559 After calling this function, the mess described in the comment in | |
560 invalidate_region_cache is cleaned up. | |
561 | |
562 This function operates by simply throwing away everything it knows | |
563 about the modified region. It doesn't care exactly which | |
564 insertions and deletions took place; it just tosses it all. | |
565 | |
566 For example, if you insert a single character at the beginning of | |
567 the buffer, and a single character at the end of the buffer (for | |
568 example), without calling this function in between the two | |
569 insertions, then the entire cache will be freed of useful | |
570 information. On the other hand, if you do manage to call this | |
571 function in between the two insertions, then the modified regions | |
572 will be small in both cases, no information will be tossed, and the | |
573 cache will know that it doesn't have knowledge of the first and | |
574 last characters any more. | |
575 | |
576 Calling this function may be expensive; it does binary searches in | |
577 the cache, and causes cache gap motion. */ | |
578 | |
579 static void | |
580 revalidate_region_cache (buf, c) | |
581 struct buffer *buf; | |
582 struct region_cache *c; | |
583 { | |
584 /* The boundaries now in the cache are expressed relative to the | |
585 buffer_beg and buffer_end values stored in the cache. Now, | |
586 buffer_beg and buffer_end may not be the same as BUF_BEG (buf) | |
587 and BUF_Z (buf), so we have two different "bases" to deal with | |
588 --- the cache's, and the buffer's. */ | |
589 | |
590 /* If the entire buffer is still valid, don't waste time. Yes, this | |
591 should be a >, not a >=; think about what beg_unchanged and | |
592 end_unchanged get set to when the only change has been an | |
593 insertion. */ | |
594 if (c->buffer_beg + c->beg_unchanged | |
595 > c->buffer_end - c->end_unchanged) | |
596 return; | |
597 | |
598 /* If all the text we knew about as of the last cache revalidation | |
599 is still there, then all of the information in the cache is still | |
600 valid. Because c->buffer_beg and c->buffer_end are out-of-date, | |
601 the modified region appears from the cache's point of view to be | |
602 a null region located someplace in the buffer. | |
603 | |
604 Now, invalidating that empty string will have no actual affect on | |
605 the cache; instead, we need to update the cache's basis first | |
606 (which will give the modified region the same size in the cache | |
607 as it has in the buffer), and then invalidate the modified | |
608 region. */ | |
609 if (c->buffer_beg + c->beg_unchanged | |
610 == c->buffer_end - c->end_unchanged) | |
611 { | |
612 /* Move the gap so that all the boundaries in the unchanged head | |
613 are expressed beg-relative, and all the boundaries in the | |
614 unchanged tail are expressed end-relative. That done, we can | |
615 plug in the new buffer beg and end, and all the positions | |
616 will be accurate. | |
617 | |
618 The boundary which has jurisdiction over the modified region | |
619 should be left before the gap. */ | |
620 move_cache_gap (c, | |
621 (find_cache_boundary (c, (c->buffer_beg | |
622 + c->beg_unchanged)) | |
623 + 1), | |
624 0); | |
625 | |
626 c->buffer_beg = BUF_BEG (buf); | |
627 c->buffer_end = BUF_Z (buf); | |
628 | |
629 /* Now that the cache's basis has been changed, the modified | |
630 region actually takes up some space in the cache, so we can | |
631 invalidate it. */ | |
632 set_cache_region (c, | |
633 c->buffer_beg + c->beg_unchanged, | |
634 c->buffer_end - c->end_unchanged, | |
635 0); | |
636 } | |
637 | |
638 /* Otherwise, there is a non-empty region in the cache which | |
639 corresponds to the modified region of the buffer. */ | |
640 else | |
641 { | |
642 int modified_ix; | |
643 | |
644 /* These positions are correct, relative to both the cache basis | |
645 and the buffer basis. */ | |
646 set_cache_region (c, | |
647 c->buffer_beg + c->beg_unchanged, | |
648 c->buffer_end - c->end_unchanged, | |
649 0); | |
650 | |
651 /* Now the cache contains only boundaries that are in the | |
652 unchanged head and tail; we've disposed of any boundaries | |
653 whose positions we can't be sure of given the information | |
654 we've saved. | |
655 | |
656 If we put the cache gap between the unchanged head and the | |
657 unchanged tail, we can adjust all the boundary positions at | |
658 once, simply by setting buffer_beg and buffer_end. | |
659 | |
660 The boundary which has jurisdiction over the modified region | |
661 should be left before the gap. */ | |
662 modified_ix = | |
663 find_cache_boundary (c, (c->buffer_beg + c->beg_unchanged)) + 1; | |
664 move_cache_gap (c, modified_ix, 0); | |
665 | |
666 c->buffer_beg = BUF_BEG (buf); | |
667 c->buffer_end = BUF_Z (buf); | |
668 | |
669 /* Now, we may have shrunk the buffer when we changed the basis, | |
670 and brought the boundaries we created for the start and end | |
671 of the modified region together, giving them the same | |
672 position. If that's the case, we should collapse them into | |
673 one boundary. Or we may even delete them both, if the values | |
674 before and after them are the same. */ | |
675 if (modified_ix < c->cache_len | |
676 && (BOUNDARY_POS (c, modified_ix - 1) | |
677 == BOUNDARY_POS (c, modified_ix))) | |
678 { | |
679 int value_after = BOUNDARY_VALUE (c, modified_ix); | |
680 | |
681 /* Should we remove both of the boundaries? Yes, if the | |
682 latter boundary is now establishing the same value that | |
683 the former boundary's predecessor does. */ | |
684 if (modified_ix - 1 > 0 | |
685 && value_after == BOUNDARY_VALUE (c, modified_ix - 2)) | |
686 delete_cache_boundaries (c, modified_ix - 1, modified_ix + 1); | |
687 else | |
688 { | |
689 /* We do need a boundary here; collapse the two | |
690 boundaries into one. */ | |
691 SET_BOUNDARY_VALUE (c, modified_ix - 1, value_after); | |
692 delete_cache_boundaries (c, modified_ix, modified_ix + 1); | |
693 } | |
694 } | |
695 } | |
696 | |
697 /* Now the entire cache is valid. */ | |
698 c->beg_unchanged | |
699 = c->end_unchanged | |
700 = c->buffer_end - c->buffer_beg; | |
701 } | |
702 | |
703 | |
704 /* Interface: Adding information to the cache. */ | |
705 | |
706 /* Assert that the region of BUF between START and END (absolute | |
707 buffer positions) is "known," for the purposes of CACHE (e.g. "has | |
708 no newlines", in the case of the line cache). */ | |
709 void | |
710 know_region_cache (buf, c, start, end) | |
711 struct buffer *buf; | |
712 struct region_cache *c; | |
713 int start, end; | |
714 { | |
715 revalidate_region_cache (buf, c); | |
716 | |
717 set_cache_region (c, start, end, 1); | |
718 } | |
719 | |
720 | |
721 /* Interface: using the cache. */ | |
722 | |
723 /* Return true if the text immediately after POS in BUF is known, for | |
724 the purposes of CACHE. If NEXT is non-zero, set *NEXT to the nearest | |
725 position after POS where the knownness changes. */ | |
726 int | |
727 region_cache_forward (buf, c, pos, next) | |
728 struct buffer *buf; | |
729 struct region_cache *c; | |
730 int pos; | |
731 int *next; | |
732 { | |
733 revalidate_region_cache (buf, c); | |
734 | |
735 { | |
736 int i = find_cache_boundary (c, pos); | |
737 int i_value = BOUNDARY_VALUE (c, i); | |
738 int j; | |
739 | |
740 /* Beyond the end of the buffer is unknown, by definition. */ | |
741 if (pos >= BUF_Z (buf)) | |
742 { | |
743 if (next) *next = BUF_Z (buf); | |
744 i_value = 0; | |
745 } | |
746 else if (next) | |
747 { | |
748 /* Scan forward from i to find the next differing position. */ | |
749 for (j = i + 1; j < c->cache_len; j++) | |
750 if (BOUNDARY_VALUE (c, j) != i_value) | |
751 break; | |
752 | |
753 if (j < c->cache_len) | |
754 *next = BOUNDARY_POS (c, j); | |
755 else | |
756 *next = BUF_Z (buf); | |
757 } | |
758 | |
759 return i_value; | |
760 } | |
761 } | |
762 | |
763 /* Return true if the text immediately before POS in BUF is known, for | |
764 the purposes of CACHE. If NEXT is non-zero, set *NEXT to the nearest | |
765 position before POS where the knownness changes. */ | |
766 int region_cache_backward (buf, c, pos, next) | |
767 struct buffer *buf; | |
768 struct region_cache *c; | |
769 int pos; | |
770 int *next; | |
771 { | |
772 revalidate_region_cache (buf, c); | |
773 | |
774 /* Before the beginning of the buffer is unknown, by | |
775 definition. */ | |
776 if (pos <= BUF_BEG (buf)) | |
777 { | |
778 if (next) *next = BUF_BEG (buf); | |
779 return 0; | |
780 } | |
781 | |
782 { | |
783 int i = find_cache_boundary (c, pos - 1); | |
784 int i_value = BOUNDARY_VALUE (c, i); | |
785 int j; | |
786 | |
787 if (next) | |
788 { | |
789 /* Scan backward from i to find the next differing position. */ | |
790 for (j = i - 1; j >= 0; j--) | |
791 if (BOUNDARY_VALUE (c, j) != i_value) | |
792 break; | |
793 | |
794 if (j >= 0) | |
795 *next = BOUNDARY_POS (c, j + 1); | |
796 else | |
797 *next = BUF_BEG (buf); | |
798 } | |
799 | |
800 return i_value; | |
801 } | |
802 } | |
803 | |
804 | |
805 /* Debugging: pretty-print a cache to the standard error output. */ | |
806 | |
807 void | |
808 pp_cache (c) | |
809 struct region_cache *c; | |
810 { | |
811 int i; | |
812 int beg_u = c->buffer_beg + c->beg_unchanged; | |
813 int end_u = c->buffer_end - c->end_unchanged; | |
814 | |
815 fprintf (stderr, | |
816 "basis: %d..%d modified: %d..%d\n", | |
817 c->buffer_beg, c->buffer_end, | |
818 beg_u, end_u); | |
819 | |
820 for (i = 0; i < c->cache_len; i++) | |
821 { | |
822 int pos = BOUNDARY_POS (c, i); | |
823 | |
824 putc (((pos < beg_u) ? 'v' | |
825 : (pos == beg_u) ? '-' | |
826 : ' '), | |
827 stderr); | |
828 putc (((pos > end_u) ? '^' | |
829 : (pos == end_u) ? '-' | |
830 : ' '), | |
831 stderr); | |
832 fprintf (stderr, "%d : %d\n", pos, BOUNDARY_VALUE (c, i)); | |
833 } | |
834 } |