My Project
Loading...
Searching...
No Matches
vspace.cc
Go to the documentation of this file.
1// https://github.com/rbehrends/vspace
2#include "vspace.h"
3#include "kernel/mod2.h"
4#ifdef HAVE_VSPACE
5#ifdef HAVE_CPP_THREADS
6#include <thread>
7#endif
8
9#if defined(__GNUC__) && (__GNUC__<9) &&!defined(__clang__)
10
11namespace vspace {
12namespace internals {
13
14size_t config[4]
16
18
19// offsetof() only works for POD types, so we need to construct
20// a portable version of it for metapage fields.
21
22#define metapageaddr(field) \
23 ((char *) &vmem.metapage->field - (char *) vmem.metapage)
24
25size_t VMem::filesize() {
26 struct stat stat;
27 fstat(fd, &stat);
28 return stat.st_size;
29}
30
31Status VMem::init(int fd) {
32 this->fd = fd;
33 for (int i = 0; i < MAX_SEGMENTS; i++)
34 segments[i] = VSeg(NULL);
35 for (int i = 0; i < MAX_PROCESS; i++) {
36 int channel[2];
37 if (pipe(channel) < 0) {
38 for (int j = 0; j < i; j++) {
39 close(channels[j].fd_read);
40 close(channels[j].fd_write);
41 }
42 return Status(ErrOS);
43 }
44 channels[i].fd_read = channel[0];
45 channels[i].fd_write = channel[1];
46 }
48 init_metapage(filesize() == 0);
51 return Status(ErrNone);
52}
53
54Status VMem::init() {
55 FILE *fp = tmpfile();
56 Status result = init(fileno(fp));
57 if (!result.ok())
58 return result;
61 metapage->process_info[0].pid = getpid();
62 return Status(ErrNone);
63}
64
65Status VMem::init(const char *path) {
66 int fd = open(path, O_RDWR | O_CREAT, 0600);
67 if (fd < 0)
68 return Status(ErrFile);
69 init(fd);
71 // TODO: enter process in meta table
73 return Status(ErrNone);
74}
75
76void VMem::deinit() {
77 if (file_handle) {
78 fclose(file_handle);
80 } else {
81 close(fd);
82 }
83 munmap(metapage, METABLOCK_SIZE);
84 metapage = NULL;
85 current_process = -1;
86 freelist = NULL;
87 for (int i = 0; i < MAX_SEGMENTS; i++) {
88 if (segments[i].base) munmap(segments[i].base, SEGMENT_SIZE);
89 segments[i] = NULL;
90 }
91 for (int i = 0; i < MAX_PROCESS; i++) {
92 close(channels[i].fd_read);
93 close(channels[i].fd_write);
94 }
95}
96
97void *VMem::mmap_segment(int seg) {
99 void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
101 if (map == MAP_FAILED) {
102 // This is an "impossible to proceed from here, because system state
103 // is impossible to proceed from" situation, so we abort the program.
104 perror("mmap");
105 abort();
106 }
108 return map;
109}
110
111void VMem::add_segment() {
112 int seg = metapage->segment_count++;
114 void *map_addr = mmap_segment(seg);
115 segments[seg] = VSeg(map_addr);
116 Block *top = block_ptr(seg * SEGMENT_SIZE);
117 top->next = freelist[LOG2_SEGMENT_SIZE];
118 top->prev = VADDR_NULL;
120}
121
122void FastLock::lock() {
123#ifdef HAVE_CPP_THREADS
124 while (_lock.test_and_set()) {
125 }
126 bool empty = _owner < 0;
127 if (empty) {
128 _owner = vmem.current_process;
129 } else {
130 int p = vmem.current_process;
131 vmem.metapage->process_info[p].next = -1;
132 if (_head < 0)
133 _head = p;
134 else
135 vmem.metapage->process_info[_tail].next = p;
136 _tail = p;
137 }
138 _lock.clear();
139 if (!empty)
140 wait_signal(false);
141#else
143#endif
144}
145
146void FastLock::unlock() {
147#ifdef HAVE_CPP_THREADS
148 while (_lock.test_and_set()) {
149 }
150 _owner = _head;
151 if (_owner >= 0)
152 _head = vmem.metapage->process_info[_head].next;
153 _lock.clear();
154 if (_owner >= 0)
155 send_signal(_owner, 0, false);
156#else
158#endif
159}
160
161static void lock_allocator() {
163}
164
165static void unlock_allocator() {
167}
168
169static void print_freelists() {
170 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
171 vaddr_t vaddr = vmem.freelist[i];
172 if (vaddr != VADDR_NULL) {
173 printf("%2d: %ld", i, vaddr);
174 vaddr_t prev = block_ptr(vaddr)->prev;
175 if (prev != VADDR_NULL) {
176 printf("(%ld)", prev);
177 }
178 assert(block_ptr(vaddr)->prev == VADDR_NULL);
179 for (;;) {
180 vaddr_t last_vaddr = vaddr;
181 Block *block = block_ptr(vaddr);
182 vaddr = block->next;
183 if (vaddr == VADDR_NULL)
184 break;
185 printf(" -> %ld", vaddr);
186 vaddr_t prev = block_ptr(vaddr)->prev;
187 if (prev != last_vaddr) {
188 printf("(%ld)", prev);
189 }
190 }
191 printf("\n");
192 }
193 }
194 fflush(stdout);
195}
196
197void vmem_free(vaddr_t vaddr) {
199 vaddr -= offsetof(Block, data);
200 vmem.ensure_is_mapped(vaddr);
201 size_t segno = vmem.segment_no(vaddr);
202 VSeg seg = vmem.segment(vaddr);
203 segaddr_t addr = vmem.segaddr(vaddr);
204 int level = seg.block_ptr(addr)->level();
205 assert(!seg.is_free(addr));
206 while (level < LOG2_SEGMENT_SIZE) {
207 segaddr_t buddy = find_buddy(addr, level);
208 Block *block = seg.block_ptr(buddy);
209 // is buddy free and at the same level?
210 if (!block->is_free() || block->level() != level)
211 break;
212 // remove buddy from freelist.
213 Block *prev = vmem.block_ptr(block->prev);
214 Block *next = vmem.block_ptr(block->next);
215 block->data[0] = level;
216 if (prev) {
217 assert(prev->next == vmem.vaddr(segno, buddy));
218 prev->next = block->next;
219 } else {
220 // head of freelist.
221 assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
222 vmem.freelist[level] = block->next;
223 }
224 if (next) {
225 assert(next->prev == vmem.vaddr(segno, buddy));
226 next->prev = block->prev;
227 }
228 // coalesce block with buddy
229 level++;
230 if (buddy < addr)
231 addr = buddy;
232 }
233 // Add coalesced block to free list
234 Block *block = seg.block_ptr(addr);
235 block->prev = VADDR_NULL;
236 block->next = vmem.freelist[level];
237 block->mark_as_free(level);
238 vaddr_t blockaddr = vmem.vaddr(segno, addr);
239 if (block->next != VADDR_NULL)
240 vmem.block_ptr(block->next)->prev = blockaddr;
241 vmem.freelist[level] = blockaddr;
243}
244
245vaddr_t vmem_alloc(size_t size) {
247 size_t alloc_size = size + offsetof(Block, data);
248 int level = find_level(alloc_size);
249 int flevel = level;
250 while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
251 flevel++;
252 if (vmem.freelist[flevel] == VADDR_NULL) {
254 }
256 while (flevel > level) {
257 // get and split a block
258 vaddr_t blockaddr = vmem.freelist[flevel];
259 assert((blockaddr & ((1 << flevel) - 1)) == 0);
260 Block *block = vmem.block_ptr(blockaddr);
261 vmem.freelist[flevel] = block->next;
262 if (vmem.freelist[flevel] != VADDR_NULL)
264 vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
265 Block *block2 = vmem.block_ptr(blockaddr2);
266 flevel--;
267 block2->next = vmem.freelist[flevel];
268 block2->prev = blockaddr;
269 block->next = blockaddr2;
270 block->prev = VADDR_NULL;
271 // block->prev == VADDR_NULL already.
272 vmem.freelist[flevel] = blockaddr;
273 }
276 vaddr_t vaddr = vmem.freelist[level];
277 vaddr_t result = vaddr + offsetof(Block, data);
278 vmem.freelist[level] = block->next;
279 if (block->next != VADDR_NULL)
281 block->mark_as_allocated(vaddr, level);
283 memset(block->data, 0, size);
284 return result;
285}
286
288 struct flock &lock_info, size_t offset, size_t len, bool lock) {
289 lock_info.l_start = offset;
290 lock_info.l_len = len;
291 lock_info.l_pid = 0;
292 lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
293 lock_info.l_whence = SEEK_SET;
294}
295
296void lock_file(int fd, size_t offset, size_t len) {
297 struct flock lock_info;
298 init_flock_struct(lock_info, offset, len, true);
299 fcntl(fd, F_SETLKW, &lock_info);
300}
301
302void unlock_file(int fd, size_t offset, size_t len) {
303 struct flock lock_info;
304 init_flock_struct(lock_info, offset, len, false);
305 fcntl(fd, F_SETLKW, &lock_info);
306}
307
308void lock_metapage() {
309 lock_file(vmem.fd, 0);
310}
311
312void unlock_metapage() {
313 unlock_file(vmem.fd, 0);
314}
315
316void init_metapage(bool create) {
317 if (create)
318 ftruncate(vmem.fd, METABLOCK_SIZE);
319 vmem.metapage = (MetaPage *) mmap(
320 NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
321 if (create) {
322 memcpy(vmem.metapage->config_header, config, sizeof(config));
323 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
325 }
328 } else {
329 assert(memcmp(vmem.metapage->config_header, config, sizeof(config)) != 0);
330 }
331}
332
333static void lock_process(int processno) {
336 + sizeof(ProcessInfo) * vmem.current_process);
337}
338
339static void unlock_process(int processno) {
342 + sizeof(ProcessInfo) * vmem.current_process);
343}
344
345static ProcessInfo &process_info(int processno) {
346 return vmem.metapage->process_info[processno];
347}
348
349bool send_signal(int processno, ipc_signal_t sig, bool lock) {
350 if (lock)
351 lock_process(processno);
352 if (process_info(processno).sigstate != Waiting) {
353 unlock_process(processno);
354 return false;
355 }
356 if (processno == vmem.current_process) {
357 process_info(processno).sigstate = Accepted;
358 process_info(processno).signal = sig;
359 } else {
360 process_info(processno).sigstate = Pending;
361 process_info(processno).signal = sig;
362 int fd = vmem.channels[processno].fd_write;
363 char buf[1] = { 0 };
364 while (write(fd, buf, 1) != 1) {
365 }
366 }
367 if (lock)
368 unlock_process(processno);
369 return true;
370}
371
372ipc_signal_t check_signal(bool resume, bool lock) {
374 if (lock)
377 switch (sigstate) {
378 case Waiting:
379 case Pending: {
381 char buf[1];
382 if (lock && sigstate == Waiting) {
384 while (read(fd, buf, 1) != 1) {
385 }
387 } else {
388 while (read(fd, buf, 1) != 1) {
389 }
390 }
393 = resume ? Waiting : Accepted;
394 if (lock)
396 break;
397 }
398 case Accepted:
400 if (resume)
402 if (lock)
404 break;
405 }
406 return result;
407}
408
409void accept_signals() {
413}
414
415ipc_signal_t wait_signal(bool lock) {
416 return check_signal(true, lock);
417}
418
419} // namespace internals
420
421pid_t fork_process() {
422 using namespace internals;
424 for (int p = 0; p < MAX_PROCESS; p++) {
425 if (vmem.metapage->process_info[p].pid == 0) {
426 pid_t pid = fork();
427 if (pid < 0) {
428 // error
429 return -1;
430 } else if (pid == 0) {
431 // child process
432 int parent = vmem.current_process;
433 vmem.current_process = p;
435 vmem.metapage->process_info[p].pid = getpid();
437 send_signal(parent);
438 } else {
439 // parent process
441 wait_signal();
442 // child has unlocked metapage, so we don't need to.
443 }
444 return pid;
445 }
446 }
448 return -1;
449}
450
451void Semaphore::post() {
452 int wakeup = -1;
454 _lock.lock();
455 if (_head == _tail) {
456 _value++;
457 } else {
458 // don't increment value, as we'll pass that on to the next process.
459 wakeup = _waiting[_head];
460 sig = _signals[_head];
461 next(_head);
462 }
463 _lock.unlock();
464 if (wakeup >= 0) {
465 internals::send_signal(wakeup, sig);
466 }
467}
468
469bool Semaphore::try_wait() {
470 bool result = false;
471 _lock.lock();
472 if (_value > 0) {
473 _value--;
474 result = true;
475 }
476 _lock.unlock();
477 return result;
478}
479
480void Semaphore::wait() {
481 _lock.lock();
482 if (_value > 0) {
483 _value--;
484 _lock.unlock();
485 return;
486 }
488 _signals[_tail] = 0;
489 next(_tail);
490 _lock.unlock();
492}
493
495 _lock.lock();
496 if (_value > 0) {
497 if (internals::send_signal(internals::vmem.current_process, sig))
498 _value--;
499 _lock.unlock();
500 return false;
501 }
503 _signals[_tail] = sig;
504 next(_tail);
505 _lock.unlock();
506 return true;
507}
508
510 bool result = false;
511 _lock.lock();
512 for (int i = _head; i != _tail; next(i)) {
513 if (_waiting[i] == internals::vmem.current_process) {
514 int last = i;
515 next(i);
516 while (i != _tail) {
519 last = i;
520 next(i);
521 }
522 _tail = last;
523 result = true;
524 break;
525 }
526 }
527 _lock.unlock();
528 return result;
529}
530
531void EventSet::add(Event *event) {
532 event->_next = NULL;
533 if (_head == NULL) {
534 _head = _tail = event;
535 } else {
536 _tail->_next = event;
537 _tail = event;
538 }
539}
540
541int EventSet::wait() {
542 size_t n = 0;
543 for (Event *event = _head; event; event = event->_next) {
544 if (!event->start_listen((int) (n++))) {
545 break;
546 }
547 }
549 for (Event *event = _head; event; event = event->_next) {
550 event->stop_listen();
551 }
553 return (int) result;
554}
555
556} // namespace vspace
557#else
558#include <cstddef>
559#include <cstdlib>
560#include <unistd.h>
561#include <sys/mman.h>
562#include <sys/stat.h>
563
564
565namespace vspace {
566namespace internals {
567
568size_t config[4]
570
572
573// offsetof() only works for POD types, so we need to construct
574// a portable version of it for metapage fields.
575
576#define metapageaddr(field) \
577 ((char *) &vmem.metapage->field - (char *) vmem.metapage)
578
580 struct stat stat;
581 fstat(fd, &stat);
582 return stat.st_size;
583}
584
586 this->fd = fd;
587 for (int i = 0; i < MAX_SEGMENTS; i++)
588 segments[i] = VSeg(NULL);
589 for (int i = 0; i < MAX_PROCESS; i++) {
590 int channel[2];
591 if (pipe(channel) < 0) {
592 for (int j = 0; j < i; j++) {
593 close(channels[j].fd_read);
594 close(channels[j].fd_write);
595 }
596 return Status(ErrOS);
597 }
598 channels[i].fd_read = channel[0];
599 channels[i].fd_write = channel[1];
600 }
602 init_metapage(filesize() == 0);
605 return Status(ErrNone);
606}
607
609 FILE *fp = tmpfile();
610 Status result = init(fileno(fp));
611 if (!result.ok())
612 return result;
613 current_process = 0;
614 file_handle = fp;
615 metapage->process_info[0].pid = getpid();
616 return Status(ErrNone);
617}
618
619Status VMem::init(const char *path) {
620 int fd = open(path, O_RDWR | O_CREAT, 0600);
621 if (fd < 0)
622 return Status(ErrFile);
623 init(fd);
625 // TODO: enter process in meta table
627 return Status(ErrNone);
628}
629
631 if (file_handle) {
632 fclose(file_handle);
634 } else {
635 close(fd);
636 }
637 munmap(metapage, METABLOCK_SIZE);
638 metapage = NULL;
639 current_process = -1;
640 freelist = NULL;
641 for (int i = 0; i < MAX_SEGMENTS; i++) {
642 if (!segments[i].is_free())
643 munmap(segments[i].base, SEGMENT_SIZE);
644 segments[i] = VSeg(NULL);
645 }
646 for (int i = 0; i < MAX_PROCESS; i++) {
647 close(channels[i].fd_read);
648 close(channels[i].fd_write);
649 }
650}
651
652void *VMem::mmap_segment(int seg) {
654 void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
656 if (map == MAP_FAILED) {
657 // This is an "impossible to proceed from here, because system state
658 // is impossible to proceed from" situation, so we abort the program.
659 perror("mmap");
660 abort();
661 }
663 return map;
664}
665
667 int seg = metapage->segment_count++;
669 void *map_addr = mmap_segment(seg);
670 segments[seg] = VSeg(map_addr);
671 Block *top = block_ptr(seg * SEGMENT_SIZE);
673 top->prev = VADDR_NULL;
675}
676
678#ifdef HAVE_CPP_THREADS
679 while (_lock.test_and_set()) {
680 }
681 bool empty = _owner < 0;
682 if (empty) {
683 _owner = vmem.current_process;
684 } else {
685 int p = vmem.current_process;
686 vmem.metapage->process_info[p].next = -1;
687 if (_head < 0)
688 _head = p;
689 else
690 vmem.metapage->process_info[_tail].next = p;
691 _tail = p;
692 }
693 _lock.clear();
694 if (!empty)
695 wait_signal(false);
696#else
698#endif
699}
700
702#ifdef HAVE_CPP_THREADS
703 while (_lock.test_and_set()) {
704 }
705 _owner = _head;
706 if (_owner >= 0)
707 _head = vmem.metapage->process_info[_head].next;
708 _lock.clear();
709 if (_owner >= 0)
710 send_signal(_owner, 0, false);
711#else
713#endif
714}
715
716static void lock_allocator() {
718}
719
720static void unlock_allocator() {
722}
723
724static void print_freelists() {
725 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
726 vaddr_t vaddr = vmem.freelist[i];
727 if (vaddr != VADDR_NULL) {
728 std::printf("%2d: %zu", i, vaddr);
729 vaddr_t prev = block_ptr(vaddr)->prev;
730 if (prev != VADDR_NULL) {
731 std::printf("(%zu)", prev);
732 }
733 assert(block_ptr(vaddr)->prev == VADDR_NULL);
734 for (;;) {
735 vaddr_t last_vaddr = vaddr;
736 Block *block = block_ptr(vaddr);
737 vaddr = block->next;
738 if (vaddr == VADDR_NULL)
739 break;
740 std::printf(" -> %zu", vaddr);
741 vaddr_t prev = block_ptr(vaddr)->prev;
742 if (prev != last_vaddr) {
743 std::printf("(%zu)", prev);
744 }
745 }
746 std::printf("\n");
747 }
748 }
749 std::fflush(stdout);
750}
751
752void vmem_free(vaddr_t vaddr) {
754 vaddr -= offsetof(Block, data);
755 vmem.ensure_is_mapped(vaddr);
756 size_t segno = vmem.segment_no(vaddr);
757 VSeg seg = vmem.segment(vaddr);
758 segaddr_t addr = vmem.segaddr(vaddr);
759 int level = seg.block_ptr(addr)->level();
760 assert(!seg.is_free(addr));
761 while (level < LOG2_SEGMENT_SIZE) {
762 segaddr_t buddy = find_buddy(addr, level);
763 Block *block = seg.block_ptr(buddy);
764 // is buddy free and at the same level?
765 if (!block->is_free() || block->level() != level)
766 break;
767 // remove buddy from freelist.
768 Block *prev = vmem.block_ptr(block->prev);
769 Block *next = vmem.block_ptr(block->next);
770 block->data[0] = level;
771 if (prev) {
772 assert(prev->next == vmem.vaddr(segno, buddy));
773 prev->next = block->next;
774 } else {
775 // head of freelist.
776 assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
777 vmem.freelist[level] = block->next;
778 }
779 if (next) {
780 assert(next->prev == vmem.vaddr(segno, buddy));
781 next->prev = block->prev;
782 }
783 // coalesce block with buddy
784 level++;
785 if (buddy < addr)
786 addr = buddy;
787 }
788 // Add coalesced block to free list
789 Block *block = seg.block_ptr(addr);
790 block->prev = VADDR_NULL;
791 block->next = vmem.freelist[level];
792 block->mark_as_free(level);
793 vaddr_t blockaddr = vmem.vaddr(segno, addr);
794 if (block->next != VADDR_NULL)
795 vmem.block_ptr(block->next)->prev = blockaddr;
796 vmem.freelist[level] = blockaddr;
798}
799
802 size_t alloc_size = size + offsetof(Block, data);
803 int level = find_level(alloc_size);
804 int flevel = level;
805 while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
806 flevel++;
807 if (vmem.freelist[flevel] == VADDR_NULL) {
809 }
811 while (flevel > level) {
812 // get and split a block
813 vaddr_t blockaddr = vmem.freelist[flevel];
814 assert((blockaddr & ((1 << flevel) - 1)) == 0);
815 Block *block = vmem.block_ptr(blockaddr);
816 vmem.freelist[flevel] = block->next;
817 if (vmem.freelist[flevel] != VADDR_NULL)
819 vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
820 Block *block2 = vmem.block_ptr(blockaddr2);
821 flevel--;
822 block2->next = vmem.freelist[flevel];
823 block2->prev = blockaddr;
824 block->next = blockaddr2;
825 block->prev = VADDR_NULL;
826 // block->prev == VADDR_NULL already.
827 vmem.freelist[flevel] = blockaddr;
828 }
831 vaddr_t vaddr = vmem.freelist[level];
832 vaddr_t result = vaddr + offsetof(Block, data);
833 vmem.freelist[level] = block->next;
834 if (block->next != VADDR_NULL)
836 block->mark_as_allocated(vaddr, level);
838 memset(block->data, 0, size);
839 return result;
840}
841
843 struct flock &lock_info, size_t offset, size_t len, bool lock) {
844 lock_info.l_start = offset;
845 lock_info.l_len = len;
846 lock_info.l_pid = 0;
847 lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
848 lock_info.l_whence = SEEK_SET;
849}
850
851void lock_file(int fd, size_t offset, size_t len) {
852 struct flock lock_info;
853 init_flock_struct(lock_info, offset, len, true);
854 fcntl(fd, F_SETLKW, &lock_info);
855}
856
857void unlock_file(int fd, size_t offset, size_t len) {
858 struct flock lock_info;
859 init_flock_struct(lock_info, offset, len, false);
860 fcntl(fd, F_SETLKW, &lock_info);
861}
862
864 lock_file(vmem.fd, 0);
865}
866
868 unlock_file(vmem.fd, 0);
869}
870
872 if (create)
873 ftruncate(vmem.fd, METABLOCK_SIZE);
874 vmem.metapage = (MetaPage *) mmap(
875 NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
876 if (create) {
877 std::memcpy(vmem.metapage->config_header, config, sizeof(config));
878 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
880 }
883 } else {
885 sizeof(config)) != 0);
886 }
887}
888
889static void lock_process(int processno) {
892 + sizeof(ProcessInfo) * vmem.current_process);
893}
894
895static void unlock_process(int processno) {
898 + sizeof(ProcessInfo) * vmem.current_process);
899}
900
901static ProcessInfo &process_info(int processno) {
902 return vmem.metapage->process_info[processno];
903}
904
905bool send_signal(int processno, ipc_signal_t sig, bool lock) {
906 if (lock)
907 lock_process(processno);
908 if (process_info(processno).sigstate != Waiting) {
909 unlock_process(processno);
910 return false;
911 }
912 if (processno == vmem.current_process) {
913 process_info(processno).sigstate = Accepted;
914 process_info(processno).signal = sig;
915 } else {
916 process_info(processno).sigstate = Pending;
917 process_info(processno).signal = sig;
918 int fd = vmem.channels[processno].fd_write;
919 char buf[1] = { 0 };
920 while (write(fd, buf, 1) != 1) {
921 }
922 }
923 if (lock)
924 unlock_process(processno);
925 return true;
926}
927
928ipc_signal_t check_signal(bool resume, bool lock) {
930 if (lock)
933 switch (sigstate) {
934 case Waiting:
935 case Pending: {
937 char buf[1];
938 if (lock && sigstate == Waiting) {
940 while (read(fd, buf, 1) != 1) {
941 }
943 } else {
944 while (read(fd, buf, 1) != 1) {
945 }
946 }
949 = resume ? Waiting : Accepted;
950 if (lock)
952 break;
953 }
954 case Accepted:
956 if (resume)
958 if (lock)
960 break;
961 }
962 return result;
963}
964
969}
970
972 return check_signal(true, lock);
973}
974
975} // namespace internals
976
978 using namespace internals;
979 lock_metapage();
980 for (int p = 0; p < MAX_PROCESS; p++) {
981 if (vmem.metapage->process_info[p].pid == 0) {
982 pid_t pid = fork();
983 if (pid < 0) {
984 // error
985 return -1;
986 } else if (pid == 0) {
987 // child process
988 int parent = vmem.current_process;
989 vmem.current_process = p;
990 lock_metapage();
991 vmem.metapage->process_info[p].pid = getpid();
992 unlock_metapage();
993 send_signal(parent);
994 } else {
995 // parent process
996 unlock_metapage();
997 wait_signal();
998 // child has unlocked metapage, so we don't need to.
999 }
1000 return pid;
1001 }
1002 }
1003 unlock_metapage();
1004 return -1;
1005}
1006
1008 int wakeup = -1;
1010 _lock.lock();
1011 if (_head == _tail) {
1012 _value++;
1013 } else {
1014 // don't increment value, as we'll pass that on to the next process.
1015 wakeup = _waiting[_head];
1016 sig = _signals[_head];
1017 next(_head);
1018 }
1019 _lock.unlock();
1020 if (wakeup >= 0) {
1021 internals::send_signal(wakeup, sig);
1022 }
1023}
1024
1026 bool result = false;
1027 _lock.lock();
1028 if (_value > 0) {
1029 _value--;
1030 result = true;
1031 }
1032 _lock.unlock();
1033 return result;
1034}
1035
1037 _lock.lock();
1038 if (_value > 0) {
1039 _value--;
1040 _lock.unlock();
1041 return;
1042 }
1044 _signals[_tail] = 0;
1045 next(_tail);
1046 _lock.unlock();
1048}
1049
1051 _lock.lock();
1052 if (_value > 0) {
1053 if (internals::send_signal(internals::vmem.current_process, sig))
1054 _value--;
1055 _lock.unlock();
1056 return false;
1057 }
1059 _signals[_tail] = sig;
1060 next(_tail);
1061 _lock.unlock();
1062 return true;
1063}
1064
1066 bool result = false;
1067 _lock.lock();
1068 for (int i = _head; i != _tail; next(i)) {
1069 if (_waiting[i] == internals::vmem.current_process) {
1070 int last = i;
1071 next(i);
1072 while (i != _tail) {
1073 _waiting[last] = _waiting[i];
1074 _signals[last] = _signals[i];
1075 last = i;
1076 next(i);
1077 }
1078 _tail = last;
1079 result = true;
1080 break;
1081 }
1082 }
1083 _lock.unlock();
1084 return result;
1085}
1086
1087void EventSet::add(Event *event) {
1088 event->_next = NULL;
1089 if (_head == NULL) {
1090 _head = _tail = event;
1091 } else {
1092 _tail->_next = event;
1093 _tail = event;
1094 }
1095}
1096
1098 size_t n = 0;
1099 for (Event *event = _head; event; event = event->_next) {
1100 if (!event->start_listen((int) (n++))) {
1101 break;
1102 }
1103 }
1105 for (Event *event = _head; event; event = event->_next) {
1106 event->stop_listen();
1107 }
1109 return (int) result;
1110}
1111
1112} // namespace vspace
1113#endif
1114#endif
int size(const CanonicalForm &f, const Variable &v)
int size ( const CanonicalForm & f, const Variable & v )
Definition: cf_ops.cc:600
int level(const CanonicalForm &f)
int i
Definition: cfEzgcd.cc:132
int p
Definition: cfModGcd.cc:4077
CanonicalForm fp
Definition: cfModGcd.cc:4101
CanonicalForm map(const CanonicalForm &primElem, const Variable &alpha, const CanonicalForm &F, const Variable &beta)
map from to such that is mapped onto
Definition: cf_map_ext.cc:504
void add(Event *event)
Definition: vspace.cc:1087
Event * _head
Definition: vspace.h:2581
Event * _tail
Definition: vspace.h:2581
Event * _next
Definition: vspace.h:2572
int _waiting[internals::MAX_PROCESS+1]
Definition: vspace.h:2348
bool start_wait(internals::ipc_signal_t sig=0)
Definition: vspace.cc:1050
internals::ipc_signal_t _signals[internals::MAX_PROCESS+1]
Definition: vspace.h:2349
FastLock _lock
Definition: vspace.h:2358
bool stop_wait()
Definition: vspace.cc:1065
return result
Definition: facAbsBiFact.cc:75
int j
Definition: facHensel.cc:110
STATIC_VAR poly last
Definition: hdegree.cc:1151
NodeM * create()
Definition: janet.cc:757
STATIC_VAR int offset
Definition: janet.cc:29
ListNode * next
Definition: janet.h:31
#define SEEK_SET
Definition: mod2.h:113
void accept_signals()
Definition: vspace.cc:965
void unlock_metapage()
Definition: vspace.cc:867
const vaddr_t VADDR_NULL
Definition: vspace.h:1417
void init_flock_struct(struct flock &lock_info, size_t offset, size_t len, bool lock)
Definition: vspace.cc:842
size_t vaddr_t
Definition: vspace.h:1414
static ProcessInfo & process_info(int processno)
Definition: vspace.cc:901
void lock_file(int fd, size_t offset, size_t len)
Definition: vspace.cc:851
void vmem_free(vaddr_t vaddr)
Definition: vspace.cc:752
Block * block_ptr(vaddr_t vaddr)
Definition: vspace.h:1637
vaddr_t vmem_alloc(size_t size)
Definition: vspace.cc:800
static void unlock_process(int processno)
Definition: vspace.cc:895
static const size_t MAX_SEGMENTS
Definition: vspace.h:1423
vaddr_t freelist[LOG2_SEGMENT_SIZE+1]
Definition: vspace.h:1511
static const size_t SEGMENT_SIZE
Definition: vspace.h:1424
static const size_t METABLOCK_SIZE
Definition: vspace.h:1420
static void lock_process(int processno)
Definition: vspace.cc:889
static const int LOG2_SEGMENT_SIZE
Definition: vspace.h:1421
ipc_signal_t wait_signal(bool lock)
Definition: vspace.cc:971
void lock_metapage()
Definition: vspace.cc:863
static const int MAX_PROCESS
Definition: vspace.h:1419
static VMem & vmem
Definition: vspace.h:1635
ProcessInfo process_info[MAX_PROCESS]
Definition: vspace.h:1513
static void lock_allocator()
Definition: vspace.cc:716
static segaddr_t find_buddy(segaddr_t addr, int level)
Definition: vspace.h:1690
ipc_signal_t check_signal(bool resume, bool lock)
Definition: vspace.cc:928
void init_metapage(bool create)
Definition: vspace.cc:871
void unlock_file(int fd, size_t offset, size_t len)
Definition: vspace.cc:857
bool send_signal(int processno, ipc_signal_t sig, bool lock)
Definition: vspace.cc:905
static int find_level(size_t size)
Definition: vspace.h:1681
size_t config[4]
Definition: vspace.cc:569
size_t segaddr_t
Definition: vspace.h:1412
static void unlock_allocator()
Definition: vspace.cc:720
static void print_freelists()
Definition: vspace.cc:724
pid_t fork_process()
Definition: vspace.cc:977
@ ErrOS
Definition: vspace.h:1380
@ ErrNone
Definition: vspace.h:1376
@ ErrFile
Definition: vspace.h:1378
internals::Mutex FastLock
Definition: vspace.h:2340
#define NULL
Definition: omList.c:12
#define block
Definition: scanner.cc:666
int status int void size_t count write
Definition: si_signals.h:67
int status read
Definition: si_signals.h:59
int status int void * buf
Definition: si_signals.h:59
int status int void size_t count int const void size_t count open
Definition: si_signals.h:73
int status int fd
Definition: si_signals.h:59
void ensure_is_mapped(vaddr_t vaddr)
Definition: vspace.h:1615
std::FILE * file_handle
Definition: vspace.h:1591
Block * block_ptr(vaddr_t vaddr)
Definition: vspace.h:1610
size_t segment_no(vaddr_t vaddr)
Definition: vspace.h:1599
void * mmap_segment(int seg)
Definition: vspace.cc:652
VSeg segment(vaddr_t vaddr)
Definition: vspace.h:1596
MetaPage * metapage
Definition: vspace.h:1589
static VMem vmem_global
Definition: vspace.h:1588
VSeg segments[MAX_SEGMENTS]
Definition: vspace.h:1594
vaddr_t vaddr(size_t segno, segaddr_t addr)
Definition: vspace.h:1602
segaddr_t segaddr(vaddr_t vaddr)
Definition: vspace.h:1605
ProcessChannel channels[MAX_PROCESS]
Definition: vspace.h:1595
Block * block_ptr(segaddr_t addr)
Definition: vspace.h:1571
#define assert(A)
Definition: svd_si.h:3
#define metapageaddr(field)
Definition: vspace.cc:576