comparison linux/lrmi.c @ 2212:f0f681ef92ff

LRMI import
author nick
date Mon, 15 Oct 2001 16:59:35 +0000
parents
children
comparison
equal deleted inserted replaced
2211:720bf2e5c408 2212:f0f681ef92ff
1 /*
2 Linux Real Mode Interface - A library of DPMI-like functions for Linux.
3
4 Copyright (C) 1998 by Josh Vanderhoof
5
6 You are free to distribute and modify this file, as long as you
7 do not remove this copyright notice and clearly label modified
8 versions as being modified.
9
10 This software has NO WARRANTY. Use it at your own risk.
11 Original location: http://cvs.debian.org/lrmi/
12 */
13
14 #include <stdio.h>
15 #include <string.h>
16 #include <sys/io.h>
17 #include <asm/vm86.h>
18
19 #ifdef USE_LIBC_VM86
20 #include <sys/vm86.h>
21 #endif
22
23 #include <sys/types.h>
24 #include <sys/stat.h>
25 #include <sys/mman.h>
26 #include <unistd.h>
27 #include <fcntl.h>
28
29 #include "lrmi.h"
30
31 #define REAL_MEM_BASE ((void *)0x10000)
32 #define REAL_MEM_SIZE 0x10000
33 #define REAL_MEM_BLOCKS 0x100
34
35 struct mem_block
36 {
37 unsigned int size : 20;
38 unsigned int free : 1;
39 };
40
41 static struct
42 {
43 int ready;
44 int count;
45 struct mem_block blocks[REAL_MEM_BLOCKS];
46 } mem_info = { 0 };
47
48 static int
49 real_mem_init(void)
50 {
51 void *m;
52 int fd_zero;
53
54 if (mem_info.ready)
55 return 1;
56
57 fd_zero = open("/dev/zero", O_RDONLY);
58 if (fd_zero == -1)
59 {
60 perror("open /dev/zero");
61 return 0;
62 }
63
64 m = mmap((void *)REAL_MEM_BASE, REAL_MEM_SIZE,
65 PROT_READ | PROT_WRITE | PROT_EXEC,
66 MAP_FIXED | MAP_PRIVATE, fd_zero, 0);
67
68 if (m == (void *)-1)
69 {
70 perror("mmap /dev/zero");
71 close(fd_zero);
72 return 0;
73 }
74
75 mem_info.ready = 1;
76 mem_info.count = 1;
77 mem_info.blocks[0].size = REAL_MEM_SIZE;
78 mem_info.blocks[0].free = 1;
79
80 return 1;
81 }
82
83
84 static void
85 insert_block(int i)
86 {
87 memmove(
88 mem_info.blocks + i + 1,
89 mem_info.blocks + i,
90 (mem_info.count - i) * sizeof(struct mem_block));
91
92 mem_info.count++;
93 }
94
95 static void
96 delete_block(int i)
97 {
98 mem_info.count--;
99
100 memmove(
101 mem_info.blocks + i,
102 mem_info.blocks + i + 1,
103 (mem_info.count - i) * sizeof(struct mem_block));
104 }
105
106 void *
107 LRMI_alloc_real(int size)
108 {
109 int i;
110 char *r = (char *)REAL_MEM_BASE;
111
112 if (!mem_info.ready)
113 return NULL;
114
115 if (mem_info.count == REAL_MEM_BLOCKS)
116 return NULL;
117
118 size = (size + 15) & ~15;
119
120 for (i = 0; i < mem_info.count; i++)
121 {
122 if (mem_info.blocks[i].free && size < mem_info.blocks[i].size)
123 {
124 insert_block(i);
125
126 mem_info.blocks[i].size = size;
127 mem_info.blocks[i].free = 0;
128 mem_info.blocks[i + 1].size -= size;
129
130 return (void *)r;
131 }
132
133 r += mem_info.blocks[i].size;
134 }
135
136 return NULL;
137 }
138
139
140 void
141 LRMI_free_real(void *m)
142 {
143 int i;
144 char *r = (char *)REAL_MEM_BASE;
145
146 if (!mem_info.ready)
147 return;
148
149 i = 0;
150 while (m != (void *)r)
151 {
152 r += mem_info.blocks[i].size;
153 i++;
154 if (i == mem_info.count)
155 return;
156 }
157
158 mem_info.blocks[i].free = 1;
159
160 if (i + 1 < mem_info.count && mem_info.blocks[i + 1].free)
161 {
162 mem_info.blocks[i].size += mem_info.blocks[i + 1].size;
163 delete_block(i + 1);
164 }
165
166 if (i - 1 >= 0 && mem_info.blocks[i - 1].free)
167 {
168 mem_info.blocks[i - 1].size += mem_info.blocks[i].size;
169 delete_block(i);
170 }
171 }
172
173
174 #define DEFAULT_VM86_FLAGS (IF_MASK | IOPL_MASK)
175 #define DEFAULT_STACK_SIZE 0x1000
176 #define RETURN_TO_32_INT 255
177
178 static struct
179 {
180 int ready;
181 unsigned short ret_seg, ret_off;
182 unsigned short stack_seg, stack_off;
183 struct vm86_struct vm;
184 } context = { 0 };
185
186
187 static inline void
188 set_bit(unsigned int bit, void *array)
189 {
190 unsigned char *a = array;
191
192 a[bit / 8] |= (1 << (bit % 8));
193 }
194
195
196 static inline unsigned int
197 get_int_seg(int i)
198 {
199 return *(unsigned short *)(i * 4 + 2);
200 }
201
202
203 static inline unsigned int
204 get_int_off(int i)
205 {
206 return *(unsigned short *)(i * 4);
207 }
208
209
210 static inline void
211 pushw(unsigned short i)
212 {
213 struct vm86_regs *r = &context.vm.regs;
214 r->esp -= 2;
215 *(unsigned short *)(((unsigned int)r->ss << 4) + r->esp) = i;
216 }
217
218
219 int
220 LRMI_init(void)
221 {
222 void *m;
223 int fd_mem;
224
225 if (context.ready)
226 return 1;
227
228 if (!real_mem_init())
229 return 0;
230
231 /*
232 Map the Interrupt Vectors (0x0 - 0x400) + BIOS data (0x400 - 0x502)
233 and the ROM (0xa0000 - 0x100000)
234 */
235 fd_mem = open("/dev/mem", O_RDWR);
236
237 if (fd_mem == -1)
238 {
239 perror("open /dev/mem");
240 return 0;
241 }
242
243 m = mmap((void *)0, 0x502,
244 PROT_READ | PROT_WRITE | PROT_EXEC,
245 MAP_FIXED | MAP_PRIVATE, fd_mem, 0);
246
247 if (m == (void *)-1)
248 {
249 perror("mmap /dev/mem");
250 return 0;
251 }
252
253 m = mmap((void *)0xa0000, 0x100000 - 0xa0000,
254 PROT_READ | PROT_WRITE,
255 MAP_FIXED | MAP_SHARED, fd_mem, 0xa0000);
256
257 if (m == (void *)-1)
258 {
259 perror("mmap /dev/mem");
260 return 0;
261 }
262
263
264 /*
265 Allocate a stack
266 */
267 m = LRMI_alloc_real(DEFAULT_STACK_SIZE);
268
269 context.stack_seg = (unsigned int)m >> 4;
270 context.stack_off = DEFAULT_STACK_SIZE;
271
272 /*
273 Allocate the return to 32 bit routine
274 */
275 m = LRMI_alloc_real(2);
276
277 context.ret_seg = (unsigned int)m >> 4;
278 context.ret_off = (unsigned int)m & 0xf;
279
280 ((unsigned char *)m)[0] = 0xcd; /* int opcode */
281 ((unsigned char *)m)[1] = RETURN_TO_32_INT;
282
283 memset(&context.vm, 0, sizeof(context.vm));
284
285 /*
286 Enable kernel emulation of all ints except RETURN_TO_32_INT
287 */
288 memset(&context.vm.int_revectored, 0, sizeof(context.vm.int_revectored));
289 set_bit(RETURN_TO_32_INT, &context.vm.int_revectored);
290
291 context.ready = 1;
292
293 return 1;
294 }
295
296
297 static void
298 set_regs(struct LRMI_regs *r)
299 {
300 context.vm.regs.edi = r->edi;
301 context.vm.regs.esi = r->esi;
302 context.vm.regs.ebp = r->ebp;
303 context.vm.regs.ebx = r->ebx;
304 context.vm.regs.edx = r->edx;
305 context.vm.regs.ecx = r->ecx;
306 context.vm.regs.eax = r->eax;
307 context.vm.regs.eflags = DEFAULT_VM86_FLAGS;
308 context.vm.regs.es = r->es;
309 context.vm.regs.ds = r->ds;
310 context.vm.regs.fs = r->fs;
311 context.vm.regs.gs = r->gs;
312 }
313
314
315 static void
316 get_regs(struct LRMI_regs *r)
317 {
318 r->edi = context.vm.regs.edi;
319 r->esi = context.vm.regs.esi;
320 r->ebp = context.vm.regs.ebp;
321 r->ebx = context.vm.regs.ebx;
322 r->edx = context.vm.regs.edx;
323 r->ecx = context.vm.regs.ecx;
324 r->eax = context.vm.regs.eax;
325 r->flags = context.vm.regs.eflags;
326 r->es = context.vm.regs.es;
327 r->ds = context.vm.regs.ds;
328 r->fs = context.vm.regs.fs;
329 r->gs = context.vm.regs.gs;
330 }
331
332 #define DIRECTION_FLAG (1 << 10)
333
334 static void
335 em_ins(int size)
336 {
337 unsigned int edx, edi;
338
339 edx = context.vm.regs.edx & 0xffff;
340 edi = context.vm.regs.edi & 0xffff;
341 edi += (unsigned int)context.vm.regs.ds << 4;
342
343 if (context.vm.regs.eflags & DIRECTION_FLAG)
344 {
345 if (size == 4)
346 asm volatile ("std; insl; cld"
347 : "=D" (edi) : "d" (edx), "0" (edi));
348 else if (size == 2)
349 asm volatile ("std; insw; cld"
350 : "=D" (edi) : "d" (edx), "0" (edi));
351 else
352 asm volatile ("std; insb; cld"
353 : "=D" (edi) : "d" (edx), "0" (edi));
354 }
355 else
356 {
357 if (size == 4)
358 asm volatile ("cld; insl"
359 : "=D" (edi) : "d" (edx), "0" (edi));
360 else if (size == 2)
361 asm volatile ("cld; insw"
362 : "=D" (edi) : "d" (edx), "0" (edi));
363 else
364 asm volatile ("cld; insb"
365 : "=D" (edi) : "d" (edx), "0" (edi));
366 }
367
368 edi -= (unsigned int)context.vm.regs.ds << 4;
369
370 context.vm.regs.edi &= 0xffff0000;
371 context.vm.regs.edi |= edi & 0xffff;
372 }
373
374 static void
375 em_rep_ins(int size)
376 {
377 unsigned int ecx, edx, edi;
378
379 ecx = context.vm.regs.ecx & 0xffff;
380 edx = context.vm.regs.edx & 0xffff;
381 edi = context.vm.regs.edi & 0xffff;
382 edi += (unsigned int)context.vm.regs.ds << 4;
383
384 if (context.vm.regs.eflags & DIRECTION_FLAG)
385 {
386 if (size == 4)
387 asm volatile ("std; rep; insl; cld"
388 : "=D" (edi), "=c" (ecx)
389 : "d" (edx), "0" (edi), "1" (ecx));
390 else if (size == 2)
391 asm volatile ("std; rep; insw; cld"
392 : "=D" (edi), "=c" (ecx)
393 : "d" (edx), "0" (edi), "1" (ecx));
394 else
395 asm volatile ("std; rep; insb; cld"
396 : "=D" (edi), "=c" (ecx)
397 : "d" (edx), "0" (edi), "1" (ecx));
398 }
399 else
400 {
401 if (size == 4)
402 asm volatile ("cld; rep; insl"
403 : "=D" (edi), "=c" (ecx)
404 : "d" (edx), "0" (edi), "1" (ecx));
405 else if (size == 2)
406 asm volatile ("cld; rep; insw"
407 : "=D" (edi), "=c" (ecx)
408 : "d" (edx), "0" (edi), "1" (ecx));
409 else
410 asm volatile ("cld; rep; insb"
411 : "=D" (edi), "=c" (ecx)
412 : "d" (edx), "0" (edi), "1" (ecx));
413 }
414
415 edi -= (unsigned int)context.vm.regs.ds << 4;
416
417 context.vm.regs.edi &= 0xffff0000;
418 context.vm.regs.edi |= edi & 0xffff;
419
420 context.vm.regs.ecx &= 0xffff0000;
421 context.vm.regs.ecx |= ecx & 0xffff;
422 }
423
424 static void
425 em_outs(int size)
426 {
427 unsigned int edx, esi;
428
429 edx = context.vm.regs.edx & 0xffff;
430 esi = context.vm.regs.esi & 0xffff;
431 esi += (unsigned int)context.vm.regs.ds << 4;
432
433 if (context.vm.regs.eflags & DIRECTION_FLAG)
434 {
435 if (size == 4)
436 asm volatile ("std; outsl; cld"
437 : "=S" (esi) : "d" (edx), "0" (esi));
438 else if (size == 2)
439 asm volatile ("std; outsw; cld"
440 : "=S" (esi) : "d" (edx), "0" (esi));
441 else
442 asm volatile ("std; outsb; cld"
443 : "=S" (esi) : "d" (edx), "0" (esi));
444 }
445 else
446 {
447 if (size == 4)
448 asm volatile ("cld; outsl"
449 : "=S" (esi) : "d" (edx), "0" (esi));
450 else if (size == 2)
451 asm volatile ("cld; outsw"
452 : "=S" (esi) : "d" (edx), "0" (esi));
453 else
454 asm volatile ("cld; outsb"
455 : "=S" (esi) : "d" (edx), "0" (esi));
456 }
457
458 esi -= (unsigned int)context.vm.regs.ds << 4;
459
460 context.vm.regs.esi &= 0xffff0000;
461 context.vm.regs.esi |= esi & 0xffff;
462 }
463
464 static void
465 em_rep_outs(int size)
466 {
467 unsigned int ecx, edx, esi;
468
469 ecx = context.vm.regs.ecx & 0xffff;
470 edx = context.vm.regs.edx & 0xffff;
471 esi = context.vm.regs.esi & 0xffff;
472 esi += (unsigned int)context.vm.regs.ds << 4;
473
474 if (context.vm.regs.eflags & DIRECTION_FLAG)
475 {
476 if (size == 4)
477 asm volatile ("std; rep; outsl; cld"
478 : "=S" (esi), "=c" (ecx)
479 : "d" (edx), "0" (esi), "1" (ecx));
480 else if (size == 2)
481 asm volatile ("std; rep; outsw; cld"
482 : "=S" (esi), "=c" (ecx)
483 : "d" (edx), "0" (esi), "1" (ecx));
484 else
485 asm volatile ("std; rep; outsb; cld"
486 : "=S" (esi), "=c" (ecx)
487 : "d" (edx), "0" (esi), "1" (ecx));
488 }
489 else
490 {
491 if (size == 4)
492 asm volatile ("cld; rep; outsl"
493 : "=S" (esi), "=c" (ecx)
494 : "d" (edx), "0" (esi), "1" (ecx));
495 else if (size == 2)
496 asm volatile ("cld; rep; outsw"
497 : "=S" (esi), "=c" (ecx)
498 : "d" (edx), "0" (esi), "1" (ecx));
499 else
500 asm volatile ("cld; rep; outsb"
501 : "=S" (esi), "=c" (ecx)
502 : "d" (edx), "0" (esi), "1" (ecx));
503 }
504
505 esi -= (unsigned int)context.vm.regs.ds << 4;
506
507 context.vm.regs.esi &= 0xffff0000;
508 context.vm.regs.esi |= esi & 0xffff;
509
510 context.vm.regs.ecx &= 0xffff0000;
511 context.vm.regs.ecx |= ecx & 0xffff;
512 }
513
514 static void
515 em_inbl(unsigned char literal)
516 {
517 context.vm.regs.eax = inb(literal) & 0xff;
518 }
519
520 static void
521 em_inb(void)
522 {
523 asm volatile ("inb (%w1), %b0"
524 : "=a" (context.vm.regs.eax)
525 : "d" (context.vm.regs.edx), "0" (context.vm.regs.eax));
526 }
527
528 static void
529 em_inw(void)
530 {
531 asm volatile ("inw (%w1), %w0"
532 : "=a" (context.vm.regs.eax)
533 : "d" (context.vm.regs.edx), "0" (context.vm.regs.eax));
534 }
535
536 static void
537 em_inl(void)
538 {
539 asm volatile ("inl (%w1), %0"
540 : "=a" (context.vm.regs.eax)
541 : "d" (context.vm.regs.edx));
542 }
543
544 static void
545 em_outbl(unsigned char literal)
546 {
547 outb(context.vm.regs.eax & 0xff, literal);
548 }
549
550 static void
551 em_outb(void)
552 {
553 asm volatile ("outb %b0, (%w1)"
554 : : "a" (context.vm.regs.eax),
555 "d" (context.vm.regs.edx));
556 }
557
558 static void
559 em_outw(void)
560 {
561 asm volatile ("outw %w0, (%w1)"
562 : : "a" (context.vm.regs.eax),
563 "d" (context.vm.regs.edx));
564 }
565
566 static void
567 em_outl(void)
568 {
569 asm volatile ("outl %0, (%w1)"
570 : : "a" (context.vm.regs.eax),
571 "d" (context.vm.regs.edx));
572 }
573
574 static int
575 emulate(void)
576 {
577 unsigned char *insn;
578 struct
579 {
580 unsigned int size : 1;
581 unsigned int rep : 1;
582 } prefix = { 0, 0 };
583 int i = 0;
584
585 insn = (unsigned char *)((unsigned int)context.vm.regs.cs << 4);
586 insn += context.vm.regs.eip;
587
588 while (1)
589 {
590 if (insn[i] == 0x66)
591 {
592 prefix.size = 1 - prefix.size;
593 i++;
594 }
595 else if (insn[i] == 0xf3)
596 {
597 prefix.rep = 1;
598 i++;
599 }
600 else if (insn[i] == 0xf0 || insn[i] == 0xf2
601 || insn[i] == 0x26 || insn[i] == 0x2e
602 || insn[i] == 0x36 || insn[i] == 0x3e
603 || insn[i] == 0x64 || insn[i] == 0x65
604 || insn[i] == 0x67)
605 {
606 /* these prefixes are just ignored */
607 i++;
608 }
609 else if (insn[i] == 0x6c)
610 {
611 if (prefix.rep)
612 em_rep_ins(1);
613 else
614 em_ins(1);
615 i++;
616 break;
617 }
618 else if (insn[i] == 0x6d)
619 {
620 if (prefix.rep)
621 {
622 if (prefix.size)
623 em_rep_ins(4);
624 else
625 em_rep_ins(2);
626 }
627 else
628 {
629 if (prefix.size)
630 em_ins(4);
631 else
632 em_ins(2);
633 }
634 i++;
635 break;
636 }
637 else if (insn[i] == 0x6e)
638 {
639 if (prefix.rep)
640 em_rep_outs(1);
641 else
642 em_outs(1);
643 i++;
644 break;
645 }
646 else if (insn[i] == 0x6f)
647 {
648 if (prefix.rep)
649 {
650 if (prefix.size)
651 em_rep_outs(4);
652 else
653 em_rep_outs(2);
654 }
655 else
656 {
657 if (prefix.size)
658 em_outs(4);
659 else
660 em_outs(2);
661 }
662 i++;
663 break;
664 }
665 else if (insn[i] == 0xe4)
666 {
667 em_inbl(insn[i + 1]);
668 i += 2;
669 break;
670 }
671 else if (insn[i] == 0xe6)
672 {
673 em_outbl(insn[i + 1]);
674 i += 2;
675 break;
676 }
677 else if (insn[i] == 0xec)
678 {
679 em_inb();
680 i++;
681 break;
682 }
683 else if (insn[i] == 0xed)
684 {
685 if (prefix.size)
686 em_inl();
687 else
688 em_inw();
689 i++;
690 break;
691 }
692 else if (insn[i] == 0xee)
693 {
694 em_outb();
695 i++;
696 break;
697 }
698 else if (insn[i] == 0xef)
699 {
700 if (prefix.size)
701 em_outl();
702 else
703 em_outw();
704
705 i++;
706 break;
707 }
708 else
709 return 0;
710 }
711
712 context.vm.regs.eip += i;
713 return 1;
714 }
715
716
717 /*
718 I don't know how to make sure I get the right vm86() from libc.
719 The one I want is syscall # 113 (vm86old() in libc 5, vm86() in glibc)
720 which should be declared as "int vm86(struct vm86_struct *);" in
721 <sys/vm86.h>.
722
723 This just does syscall 113 with inline asm, which should work
724 for both libc's (I hope).
725 */
726 #if !defined(USE_LIBC_VM86)
727 static int
728 lrmi_vm86(struct vm86_struct *vm)
729 {
730 int r;
731 #ifdef __PIC__
732 asm volatile (
733 "pushl %%ebx\n\t"
734 "movl %2, %%ebx\n\t"
735 "int $0x80\n\t"
736 "popl %%ebx"
737 : "=a" (r)
738 : "0" (113), "r" (vm));
739 #else
740 asm volatile (
741 "int $0x80"
742 : "=a" (r)
743 : "0" (113), "b" (vm));
744 #endif
745 return r;
746 }
747 #else
748 #define lrmi_vm86 vm86
749 #endif
750
751
752 static void
753 debug_info(int vret)
754 {
755 int i;
756 unsigned char *p;
757
758 fputs("vm86() failed\n", stderr);
759 fprintf(stderr, "return = 0x%x\n", vret);
760 fprintf(stderr, "eax = 0x%08lx\n", context.vm.regs.eax);
761 fprintf(stderr, "ebx = 0x%08lx\n", context.vm.regs.ebx);
762 fprintf(stderr, "ecx = 0x%08lx\n", context.vm.regs.ecx);
763 fprintf(stderr, "edx = 0x%08lx\n", context.vm.regs.edx);
764 fprintf(stderr, "esi = 0x%08lx\n", context.vm.regs.esi);
765 fprintf(stderr, "edi = 0x%08lx\n", context.vm.regs.edi);
766 fprintf(stderr, "ebp = 0x%08lx\n", context.vm.regs.ebp);
767 fprintf(stderr, "eip = 0x%08lx\n", context.vm.regs.eip);
768 fprintf(stderr, "cs = 0x%04x\n", context.vm.regs.cs);
769 fprintf(stderr, "esp = 0x%08lx\n", context.vm.regs.esp);
770 fprintf(stderr, "ss = 0x%04x\n", context.vm.regs.ss);
771 fprintf(stderr, "ds = 0x%04x\n", context.vm.regs.ds);
772 fprintf(stderr, "es = 0x%04x\n", context.vm.regs.es);
773 fprintf(stderr, "fs = 0x%04x\n", context.vm.regs.fs);
774 fprintf(stderr, "gs = 0x%04x\n", context.vm.regs.gs);
775 fprintf(stderr, "eflags = 0x%08lx\n", context.vm.regs.eflags);
776
777 fputs("cs:ip = [ ", stderr);
778
779 p = (unsigned char *)((context.vm.regs.cs << 4) + (context.vm.regs.eip & 0xffff));
780
781 for (i = 0; i < 16; ++i)
782 fprintf(stderr, "%02x ", (unsigned int)p[i]);
783
784 fputs("]\n", stderr);
785 }
786
787
788 static int
789 run_vm86(void)
790 {
791 unsigned int vret;
792
793 while (1)
794 {
795 vret = lrmi_vm86(&context.vm);
796
797 if (VM86_TYPE(vret) == VM86_INTx)
798 {
799 unsigned int v = VM86_ARG(vret);
800
801 if (v == RETURN_TO_32_INT)
802 return 1;
803
804 pushw(context.vm.regs.eflags);
805 pushw(context.vm.regs.cs);
806 pushw(context.vm.regs.eip);
807
808 context.vm.regs.cs = get_int_seg(v);
809 context.vm.regs.eip = get_int_off(v);
810 context.vm.regs.eflags &= ~(VIF_MASK | TF_MASK);
811
812 continue;
813 }
814
815 if (VM86_TYPE(vret) != VM86_UNKNOWN)
816 break;
817
818 if (!emulate())
819 break;
820 }
821
822 #ifdef ORIGINAL_LRMI_CODE_THAT_GOT_IFDEFED_OUT
823 debug_info(vret);
824 #endif
825 return 0;
826 }
827
828
829 int
830 LRMI_call(struct LRMI_regs *r)
831 {
832 unsigned int vret;
833
834 memset(&context.vm.regs, 0, sizeof(context.vm.regs));
835
836 set_regs(r);
837
838 context.vm.regs.cs = r->cs;
839 context.vm.regs.eip = r->ip;
840
841 if (r->ss == 0 && r->sp == 0)
842 {
843 context.vm.regs.ss = context.stack_seg;
844 context.vm.regs.esp = context.stack_off;
845 }
846 else
847 {
848 context.vm.regs.ss = r->ss;
849 context.vm.regs.esp = r->sp;
850 }
851
852 pushw(context.ret_seg);
853 pushw(context.ret_off);
854
855 vret = run_vm86();
856
857 get_regs(r);
858
859 return vret;
860 }
861
862
863 int
864 LRMI_int(int i, struct LRMI_regs *r)
865 {
866 unsigned int vret;
867 unsigned int seg, off;
868
869 seg = get_int_seg(i);
870 off = get_int_off(i);
871
872 /*
873 If the interrupt is in regular memory, it's probably
874 still pointing at a dos TSR (which is now gone).
875 */
876 if (seg < 0xa000 || (seg << 4) + off >= 0x100000)
877 {
878 #ifdef ORIGINAL_LRMI_CODE_THAT_GOT_IFDEFED_OUT
879 fprintf(stderr, "Int 0x%x is not in rom (%04x:%04x)\n", i, seg, off);
880 #endif
881 return 0;
882 }
883
884 memset(&context.vm.regs, 0, sizeof(context.vm.regs));
885
886 set_regs(r);
887
888 context.vm.regs.cs = seg;
889 context.vm.regs.eip = off;
890
891 if (r->ss == 0 && r->sp == 0)
892 {
893 context.vm.regs.ss = context.stack_seg;
894 context.vm.regs.esp = context.stack_off;
895 }
896 else
897 {
898 context.vm.regs.ss = r->ss;
899 context.vm.regs.esp = r->sp;
900 }
901
902 pushw(DEFAULT_VM86_FLAGS);
903 pushw(context.ret_seg);
904 pushw(context.ret_off);
905
906 vret = run_vm86();
907
908 get_regs(r);
909
910 return vret;
911 }
912