My Project
vspace.cc
Go to the documentation of this file.
1// https://github.com/rbehrends/vspace
2#include "vspace.h"
3#include "kernel/mod2.h"
4#ifdef HAVE_VSPACE
5#ifdef HAVE_CPP_THREADS
6#include <thread>
7#endif
8#include <cstddef>
9
10#if defined(__GNUC__) && (__GNUC__<9) &&!defined(__clang__)
11
12namespace vspace {
13namespace internals {
14
15size_t config[4]
17
19
20// offsetof() only works for POD types, so we need to construct
21// a portable version of it for metapage fields.
22
23#define metapageaddr(field) \
24 ((char *) &vmem.metapage->field - (char *) vmem.metapage)
25
26size_t VMem::filesize() {
27 struct stat stat;
28 fstat(fd, &stat);
29 return stat.st_size;
30}
31
32Status VMem::init(int fd) {
33 this->fd = fd;
34 for (int i = 0; i < MAX_SEGMENTS; i++)
35 segments[i] = VSeg(NULL);
36 for (int i = 0; i < MAX_PROCESS; i++) {
37 int channel[2];
38 if (pipe(channel) < 0) {
39 for (int j = 0; j < i; j++) {
40 close(channels[j].fd_read);
41 close(channels[j].fd_write);
42 }
43 return Status(ErrOS);
44 }
45 channels[i].fd_read = channel[0];
46 channels[i].fd_write = channel[1];
47 }
49 init_metapage(filesize() == 0);
52 return Status(ErrNone);
53}
54
55Status VMem::init() {
56 FILE *fp = tmpfile();
57 Status result = init(fileno(fp));
58 if (!result.ok())
59 return result;
62 metapage->process_info[0].pid = getpid();
63 return Status(ErrNone);
64}
65
66Status VMem::init(const char *path) {
67 int fd = open(path, O_RDWR | O_CREAT, 0600);
68 if (fd < 0)
69 return Status(ErrFile);
70 init(fd);
72 // TODO: enter process in meta table
74 return Status(ErrNone);
75}
76
77void VMem::deinit() {
78 if (file_handle) {
79 fclose(file_handle);
81 } else {
82 close(fd);
83 }
84 munmap(metapage, METABLOCK_SIZE);
85 metapage = NULL;
86 current_process = -1;
87 freelist = NULL;
88 for (int i = 0; i < MAX_SEGMENTS; i++) {
89 if (segments[i].base) munmap(segments[i].base, SEGMENT_SIZE);
90 segments[i] = NULL;
91 }
92 for (int i = 0; i < MAX_PROCESS; i++) {
93 close(channels[i].fd_read);
94 close(channels[i].fd_write);
95 }
96}
97
98void *VMem::mmap_segment(int seg) {
100 void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
102 if (map == MAP_FAILED) {
103 // This is an "impossible to proceed from here, because system state
104 // is impossible to proceed from" situation, so we abort the program.
105 perror("mmap");
106 abort();
107 }
109 return map;
110}
111
112void VMem::add_segment() {
113 int seg = metapage->segment_count++;
115 void *map_addr = mmap_segment(seg);
116 segments[seg] = VSeg(map_addr);
117 Block *top = block_ptr(seg * SEGMENT_SIZE);
118 top->next = freelist[LOG2_SEGMENT_SIZE];
119 top->prev = VADDR_NULL;
121}
122
123void FastLock::lock() {
124#ifdef HAVE_CPP_THREADS
125 while (_lock.test_and_set()) {
126 }
127 bool empty = _owner < 0;
128 if (empty) {
129 _owner = vmem.current_process;
130 } else {
131 int p = vmem.current_process;
132 vmem.metapage->process_info[p].next = -1;
133 if (_head < 0)
134 _head = p;
135 else
136 vmem.metapage->process_info[_tail].next = p;
137 _tail = p;
138 }
139 _lock.clear();
140 if (!empty)
141 wait_signal(false);
142#else
144#endif
145}
146
147void FastLock::unlock() {
148#ifdef HAVE_CPP_THREADS
149 while (_lock.test_and_set()) {
150 }
151 _owner = _head;
152 if (_owner >= 0)
153 _head = vmem.metapage->process_info[_head].next;
154 _lock.clear();
155 if (_owner >= 0)
156 send_signal(_owner, 0, false);
157#else
159#endif
160}
161
162static void lock_allocator() {
164}
165
166static void unlock_allocator() {
168}
169
170static void print_freelists() {
171 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
172 vaddr_t vaddr = vmem.freelist[i];
173 if (vaddr != VADDR_NULL) {
174 printf("%2d: %ld", i, (long)vaddr);
175 vaddr_t prev = block_ptr(vaddr)->prev;
176 if (prev != VADDR_NULL) {
177 printf("(%ld)", (long)prev);
178 }
179 assert(block_ptr(vaddr)->prev == VADDR_NULL);
180 for (;;) {
181 vaddr_t last_vaddr = vaddr;
182 Block *block = block_ptr(vaddr);
183 vaddr = block->next;
184 if (vaddr == VADDR_NULL)
185 break;
186 printf(" -> %ld", (long)vaddr);
187 vaddr_t prev = block_ptr(vaddr)->prev;
188 if (prev != last_vaddr) {
189 printf("(%ld)", (long)prev);
190 }
191 }
192 printf("\n");
193 }
194 }
195 fflush(stdout);
196}
197
198void vmem_free(vaddr_t vaddr) {
200 vaddr -= offsetof(Block, data);
201 vmem.ensure_is_mapped(vaddr);
202 size_t segno = vmem.segment_no(vaddr);
203 VSeg seg = vmem.segment(vaddr);
204 segaddr_t addr = vmem.segaddr(vaddr);
205 int level = seg.block_ptr(addr)->level();
206 assert(!seg.is_free(addr));
207 while (level < LOG2_SEGMENT_SIZE) {
208 segaddr_t buddy = find_buddy(addr, level);
209 Block *block = seg.block_ptr(buddy);
210 // is buddy free and at the same level?
211 if (!block->is_free() || block->level() != level)
212 break;
213 // remove buddy from freelist.
214 Block *prev = vmem.block_ptr(block->prev);
215 Block *next = vmem.block_ptr(block->next);
216 block->data[0] = level;
217 if (prev) {
218 assert(prev->next == vmem.vaddr(segno, buddy));
219 prev->next = block->next;
220 } else {
221 // head of freelist.
222 assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
223 vmem.freelist[level] = block->next;
224 }
225 if (next) {
226 assert(next->prev == vmem.vaddr(segno, buddy));
227 next->prev = block->prev;
228 }
229 // coalesce block with buddy
230 level++;
231 if (buddy < addr)
232 addr = buddy;
233 }
234 // Add coalesced block to free list
235 Block *block = seg.block_ptr(addr);
236 block->prev = VADDR_NULL;
237 block->next = vmem.freelist[level];
238 block->mark_as_free(level);
239 vaddr_t blockaddr = vmem.vaddr(segno, addr);
240 if (block->next != VADDR_NULL)
241 vmem.block_ptr(block->next)->prev = blockaddr;
242 vmem.freelist[level] = blockaddr;
244}
245
246vaddr_t vmem_alloc(size_t size) {
248 size_t alloc_size = size + offsetof(Block, data);
249 int level = find_level(alloc_size);
250 int flevel = level;
251 while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
252 flevel++;
253 if (vmem.freelist[flevel] == VADDR_NULL) {
255 }
257 while (flevel > level) {
258 // get and split a block
259 vaddr_t blockaddr = vmem.freelist[flevel];
260 assert((blockaddr & ((1 << flevel) - 1)) == 0);
261 Block *block = vmem.block_ptr(blockaddr);
262 vmem.freelist[flevel] = block->next;
263 if (vmem.freelist[flevel] != VADDR_NULL)
265 vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
266 Block *block2 = vmem.block_ptr(blockaddr2);
267 flevel--;
268 block2->next = vmem.freelist[flevel];
269 block2->prev = blockaddr;
270 block->next = blockaddr2;
271 block->prev = VADDR_NULL;
272 // block->prev == VADDR_NULL already.
273 vmem.freelist[flevel] = blockaddr;
274 }
277 vaddr_t vaddr = vmem.freelist[level];
278 vaddr_t result = vaddr + offsetof(Block, data);
279 vmem.freelist[level] = block->next;
280 if (block->next != VADDR_NULL)
282 block->mark_as_allocated(vaddr, level);
284 memset(block->data, 0, size);
285 return result;
286}
287
289 struct flock &lock_info, size_t offset, size_t len, bool lock) {
290 lock_info.l_start = offset;
291 lock_info.l_len = len;
292 lock_info.l_pid = 0;
293 lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
294 lock_info.l_whence = SEEK_SET;
295}
296
297void lock_file(int fd, size_t offset, size_t len) {
298 struct flock lock_info;
299 init_flock_struct(lock_info, offset, len, true);
300 fcntl(fd, F_SETLKW, &lock_info);
301}
302
303void unlock_file(int fd, size_t offset, size_t len) {
304 struct flock lock_info;
305 init_flock_struct(lock_info, offset, len, false);
306 fcntl(fd, F_SETLKW, &lock_info);
307}
308
309void lock_metapage() {
310 lock_file(vmem.fd, 0);
311}
312
313void unlock_metapage() {
314 unlock_file(vmem.fd, 0);
315}
316
317void init_metapage(bool create) {
318 if (create)
319 ftruncate(vmem.fd, METABLOCK_SIZE);
320 vmem.metapage = (MetaPage *) mmap(
321 NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
322 if (create) {
323 memcpy(vmem.metapage->config_header, config, sizeof(config));
324 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
326 }
329 } else {
330 assert(memcmp(vmem.metapage->config_header, config, sizeof(config)) != 0);
331 }
332}
333
334static void lock_process(int processno) {
337 + sizeof(ProcessInfo) * vmem.current_process);
338}
339
340static void unlock_process(int processno) {
343 + sizeof(ProcessInfo) * vmem.current_process);
344}
345
346static ProcessInfo &process_info(int processno) {
347 return vmem.metapage->process_info[processno];
348}
349
350bool send_signal(int processno, ipc_signal_t sig, bool lock) {
351 if (lock)
352 lock_process(processno);
353 if (process_info(processno).sigstate != Waiting) {
354 unlock_process(processno);
355 return false;
356 }
357 if (processno == vmem.current_process) {
358 process_info(processno).sigstate = Accepted;
359 process_info(processno).signal = sig;
360 } else {
361 process_info(processno).sigstate = Pending;
362 process_info(processno).signal = sig;
363 int fd = vmem.channels[processno].fd_write;
364 char buf[1] = { 0 };
365 while (write(fd, buf, 1) != 1) {
366 }
367 }
368 if (lock)
369 unlock_process(processno);
370 return true;
371}
372
373ipc_signal_t check_signal(bool resume, bool lock) {
375 if (lock)
378 switch (sigstate) {
379 case Waiting:
380 case Pending: {
382 char buf[1];
383 if (lock && sigstate == Waiting) {
385 while (read(fd, buf, 1) != 1) {
386 }
388 } else {
389 while (read(fd, buf, 1) != 1) {
390 }
391 }
394 = resume ? Waiting : Accepted;
395 if (lock)
397 break;
398 }
399 case Accepted:
401 if (resume)
403 if (lock)
405 break;
406 }
407 return result;
408}
409
410void accept_signals() {
414}
415
416ipc_signal_t wait_signal(bool lock) {
417 return check_signal(true, lock);
418}
419
420} // namespace internals
421
422pid_t fork_process() {
423 using namespace internals;
425 for (int p = 0; p < MAX_PROCESS; p++) {
426 if (vmem.metapage->process_info[p].pid == 0) {
427 pid_t pid = fork();
428 if (pid < 0) {
429 // error
430 return -1;
431 } else if (pid == 0) {
432 // child process
433 int parent = vmem.current_process;
436 vmem.metapage->process_info[p].pid = getpid();
438 send_signal(parent);
439 } else {
440 // parent process
442 wait_signal();
443 // child has unlocked metapage, so we don't need to.
444 }
445 return pid;
446 }
447 }
449 return -1;
450}
451
452void Semaphore::post() {
453 int wakeup = -1;
455 _lock.lock();
456 if (_head == _tail) {
457 _value++;
458 } else {
459 // don't increment value, as we'll pass that on to the next process.
460 wakeup = _waiting[_head];
461 sig = _signals[_head];
462 next(_head);
463 }
464 _lock.unlock();
465 if (wakeup >= 0) {
466 internals::send_signal(wakeup, sig);
467 }
468}
469
470bool Semaphore::try_wait() {
471 bool result = false;
472 _lock.lock();
473 if (_value > 0) {
474 _value--;
475 result = true;
476 }
477 _lock.unlock();
478 return result;
479}
480
481void Semaphore::wait() {
482 _lock.lock();
483 if (_value > 0) {
484 _value--;
485 _lock.unlock();
486 return;
487 }
489 _signals[_tail] = 0;
490 next(_tail);
491 _lock.unlock();
493}
494
496 _lock.lock();
497 if (_value > 0) {
498 if (internals::send_signal(internals::vmem.current_process, sig))
499 _value--;
500 _lock.unlock();
501 return false;
502 }
504 _signals[_tail] = sig;
505 next(_tail);
506 _lock.unlock();
507 return true;
508}
509
511 bool result = false;
512 _lock.lock();
513 for (int i = _head; i != _tail; next(i)) {
514 if (_waiting[i] == internals::vmem.current_process) {
515 int last = i;
516 next(i);
517 while (i != _tail) {
520 last = i;
521 next(i);
522 }
523 _tail = last;
524 result = true;
525 break;
526 }
527 }
528 _lock.unlock();
529 return result;
530}
531
532void EventSet::add(Event *event) {
533 event->_next = NULL;
534 if (_head == NULL) {
535 _head = _tail = event;
536 } else {
537 _tail->_next = event;
538 _tail = event;
539 }
540}
541
542int EventSet::wait() {
543 size_t n = 0;
544 for (Event *event = _head; event; event = event->_next) {
545 if (!event->start_listen((int) (n++))) {
546 break;
547 }
548 }
550 for (Event *event = _head; event; event = event->_next) {
551 event->stop_listen();
552 }
554 return (int) result;
555}
556
557} // namespace vspace
558#else
559#include <cstdlib>
560#include <unistd.h>
561#include <sys/mman.h>
562#include <sys/stat.h>
563
564
565namespace vspace {
566namespace internals {
567
568size_t config[4]
570
572
573// offsetof() only works for POD types, so we need to construct
574// a portable version of it for metapage fields.
575
576#define metapageaddr(field) \
577 ((char *) &vmem.metapage->field - (char *) vmem.metapage)
578
580 struct stat stat;
581 fstat(fd, &stat);
582 return stat.st_size;
583}
584
586 this->fd = fd;
587 for (int i = 0; i < MAX_SEGMENTS; i++)
588 segments[i] = VSeg(NULL);
589 for (int i = 0; i < MAX_PROCESS; i++) {
590 int channel[2];
591 if (pipe(channel) < 0) {
592 for (int j = 0; j < i; j++) {
593 close(channels[j].fd_read);
594 close(channels[j].fd_write);
595 }
596 return Status(ErrOS);
597 }
598 channels[i].fd_read = channel[0];
599 channels[i].fd_write = channel[1];
600 }
602 init_metapage(filesize() == 0);
605 return Status(ErrNone);
606}
607
609 FILE *fp = tmpfile();
610 Status result = init(fileno(fp));
611 if (!result.ok())
612 return result;
613 current_process = 0;
614 file_handle = fp;
615 metapage->process_info[0].pid = getpid();
616 return Status(ErrNone);
617}
618
619Status VMem::init(const char *path) {
620 int fd = open(path, O_RDWR | O_CREAT, 0600);
621 if (fd < 0)
622 return Status(ErrFile);
623 init(fd);
625 // TODO: enter process in meta table
627 return Status(ErrNone);
628}
629
631 if (file_handle) {
632 fclose(file_handle);
634 } else {
635 close(fd);
636 }
637 munmap(metapage, METABLOCK_SIZE);
638 metapage = NULL;
639 current_process = -1;
640 freelist = NULL;
641 for (int i = 0; i < MAX_SEGMENTS; i++) {
642 if (!segments[i].is_free())
643 munmap(segments[i].base, SEGMENT_SIZE);
644 segments[i] = VSeg(NULL);
645 }
646 for (int i = 0; i < MAX_PROCESS; i++) {
647 close(channels[i].fd_read);
648 close(channels[i].fd_write);
649 }
650}
651
652void *VMem::mmap_segment(int seg) {
654 void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
656 if (map == MAP_FAILED) {
657 // This is an "impossible to proceed from here, because system state
658 // is impossible to proceed from" situation, so we abort the program.
659 perror("mmap");
660 abort();
661 }
663 return map;
664}
665
667 int seg = metapage->segment_count++;
669 void *map_addr = mmap_segment(seg);
670 segments[seg] = VSeg(map_addr);
671 Block *top = block_ptr(seg * SEGMENT_SIZE);
673 top->prev = VADDR_NULL;
675}
676
678#ifdef HAVE_CPP_THREADS
679 while (_lock.test_and_set()) {
680 }
681 bool empty = _owner < 0;
682 if (empty) {
683 _owner = vmem.current_process;
684 } else {
685 int p = vmem.current_process;
686 vmem.metapage->process_info[p].next = -1;
687 if (_head < 0)
688 _head = p;
689 else
690 vmem.metapage->process_info[_tail].next = p;
691 _tail = p;
692 }
693 _lock.clear();
694 if (!empty)
695 wait_signal(false);
696#else
698#endif
699}
700
702#ifdef HAVE_CPP_THREADS
703 while (_lock.test_and_set()) {
704 }
705 _owner = _head;
706 if (_owner >= 0)
707 _head = vmem.metapage->process_info[_head].next;
708 _lock.clear();
709 if (_owner >= 0)
710 send_signal(_owner, 0, false);
711#else
713#endif
714}
715
716static void lock_allocator() {
718}
719
720static void unlock_allocator() {
722}
723
724static void print_freelists() {
725 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
726 vaddr_t vaddr = vmem.freelist[i];
727 if (vaddr != VADDR_NULL) {
728 std::printf("%2d: %ld", i, (long)vaddr);
729 vaddr_t prev = block_ptr(vaddr)->prev;
730 if (prev != VADDR_NULL) {
731 std::printf("(%ld)", (long)prev);
732 }
733 assert(block_ptr(vaddr)->prev == VADDR_NULL);
734 for (;;) {
735 vaddr_t last_vaddr = vaddr;
736 Block *block = block_ptr(vaddr);
737 vaddr = block->next;
738 if (vaddr == VADDR_NULL)
739 break;
740 std::printf(" -> %ld", (long)vaddr);
741 vaddr_t prev = block_ptr(vaddr)->prev;
742 if (prev != last_vaddr) {
743 std::printf("(%ld)", (long)prev);
744 }
745 }
746 std::printf("\n");
747 }
748 }
749 std::fflush(stdout);
750}
751
752void vmem_free(vaddr_t vaddr) {
754 vaddr -= offsetof(Block, data);
755 vmem.ensure_is_mapped(vaddr);
756 size_t segno = vmem.segment_no(vaddr);
757 VSeg seg = vmem.segment(vaddr);
758 segaddr_t addr = vmem.segaddr(vaddr);
759 int level = seg.block_ptr(addr)->level();
760 assert(!seg.is_free(addr));
761 while (level < LOG2_SEGMENT_SIZE) {
762 segaddr_t buddy = find_buddy(addr, level);
763 Block *block = seg.block_ptr(buddy);
764 // is buddy free and at the same level?
765 if (!block->is_free() || block->level() != level)
766 break;
767 // remove buddy from freelist.
768 Block *prev = vmem.block_ptr(block->prev);
769 Block *next = vmem.block_ptr(block->next);
770 block->data[0] = level;
771 if (prev) {
772 assert(prev->next == vmem.vaddr(segno, buddy));
773 prev->next = block->next;
774 } else {
775 // head of freelist.
776 assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
777 vmem.freelist[level] = block->next;
778 }
779 if (next) {
780 assert(next->prev == vmem.vaddr(segno, buddy));
781 next->prev = block->prev;
782 }
783 // coalesce block with buddy
784 level++;
785 if (buddy < addr)
786 addr = buddy;
787 }
788 // Add coalesced block to free list
789 Block *block = seg.block_ptr(addr);
790 block->prev = VADDR_NULL;
791 block->next = vmem.freelist[level];
792 block->mark_as_free(level);
793 vaddr_t blockaddr = vmem.vaddr(segno, addr);
794 if (block->next != VADDR_NULL)
795 vmem.block_ptr(block->next)->prev = blockaddr;
796 vmem.freelist[level] = blockaddr;
798}
799
802 size_t alloc_size = size + offsetof(Block, data);
803 int level = find_level(alloc_size);
804 int flevel = level;
805 while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
806 flevel++;
807 if (vmem.freelist[flevel] == VADDR_NULL) {
809 }
811 while (flevel > level) {
812 // get and split a block
813 vaddr_t blockaddr = vmem.freelist[flevel];
814 assert((blockaddr & ((1 << flevel) - 1)) == 0);
815 Block *block = vmem.block_ptr(blockaddr);
816 vmem.freelist[flevel] = block->next;
817 if (vmem.freelist[flevel] != VADDR_NULL)
819 vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
820 Block *block2 = vmem.block_ptr(blockaddr2);
821 flevel--;
822 block2->next = vmem.freelist[flevel];
823 block2->prev = blockaddr;
824 block->next = blockaddr2;
825 block->prev = VADDR_NULL;
826 // block->prev == VADDR_NULL already.
827 vmem.freelist[flevel] = blockaddr;
828 }
831 vaddr_t vaddr = vmem.freelist[level];
832 vaddr_t result = vaddr + offsetof(Block, data);
833 vmem.freelist[level] = block->next;
834 if (block->next != VADDR_NULL)
836 block->mark_as_allocated(vaddr, level);
838 memset(block->data, 0, size);
839 return result;
840}
841
843 struct flock &lock_info, size_t offset, size_t len, bool lock) {
844 lock_info.l_start = offset;
845 lock_info.l_len = len;
846 lock_info.l_pid = 0;
847 lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
848 lock_info.l_whence = SEEK_SET;
849}
850
851void lock_file(int fd, size_t offset, size_t len) {
852 struct flock lock_info;
853 init_flock_struct(lock_info, offset, len, true);
854 fcntl(fd, F_SETLKW, &lock_info);
855}
856
857void unlock_file(int fd, size_t offset, size_t len) {
858 struct flock lock_info;
859 init_flock_struct(lock_info, offset, len, false);
860 fcntl(fd, F_SETLKW, &lock_info);
861}
862
864 lock_file(vmem.fd, 0);
865}
866
868 unlock_file(vmem.fd, 0);
869}
870
872 if (create)
873 ftruncate(vmem.fd, METABLOCK_SIZE);
874 vmem.metapage = (MetaPage *) mmap(
875 NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
876 if (create) {
877 std::memcpy(vmem.metapage->config_header, config, sizeof(config));
878 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
880 }
883 } else {
885 sizeof(config)) != 0);
886 }
887}
888
889static void lock_process(int processno) {
892 + sizeof(ProcessInfo) * vmem.current_process);
893}
894
895static void unlock_process(int processno) {
898 + sizeof(ProcessInfo) * vmem.current_process);
899}
900
901static ProcessInfo &process_info(int processno) {
902 return vmem.metapage->process_info[processno];
903}
904
905bool send_signal(int processno, ipc_signal_t sig, bool lock) {
906 if (lock)
907 lock_process(processno);
908 if (process_info(processno).sigstate != Waiting) {
909 unlock_process(processno);
910 return false;
911 }
912 if (processno == vmem.current_process) {
913 process_info(processno).sigstate = Accepted;
914 process_info(processno).signal = sig;
915 } else {
916 process_info(processno).sigstate = Pending;
917 process_info(processno).signal = sig;
918 int fd = vmem.channels[processno].fd_write;
919 char buf[1] = { 0 };
920 while (write(fd, buf, 1) != 1) {
921 }
922 }
923 if (lock)
924 unlock_process(processno);
925 return true;
926}
927
928ipc_signal_t check_signal(bool resume, bool lock) {
930 if (lock)
933 switch (sigstate) {
934 case Waiting:
935 case Pending: {
937 char buf[1];
938 if (lock && sigstate == Waiting) {
940 while (read(fd, buf, 1) != 1) {
941 }
943 } else {
944 while (read(fd, buf, 1) != 1) {
945 }
946 }
949 = resume ? Waiting : Accepted;
950 if (lock)
952 break;
953 }
954 case Accepted:
956 if (resume)
958 if (lock)
960 break;
961 }
962 return result;
963}
964
969}
970
972 return check_signal(true, lock);
973}
974
975} // namespace internals
976
978 using namespace internals;
980 for (int p = 0; p < MAX_PROCESS; p++) {
981 if (vmem.metapage->process_info[p].pid == 0) {
982 pid_t pid = fork();
983 if (pid < 0) {
984 // error
985 return -1;
986 } else if (pid == 0) {
987 // child process
988 int parent = vmem.current_process;
991 vmem.metapage->process_info[p].pid = getpid();
993 send_signal(parent);
994 } else {
995 // parent process
997 wait_signal();
998 // child has unlocked metapage, so we don't need to.
999 }
1000 return pid;
1001 }
1002 }
1004 return -1;
1005}
1006
1008 int wakeup = -1;
1010 _lock.lock();
1011 if (_head == _tail) {
1012 _value++;
1013 } else {
1014 // don't increment value, as we'll pass that on to the next process.
1015 wakeup = _waiting[_head];
1016 sig = _signals[_head];
1017 next(_head);
1018 }
1019 _lock.unlock();
1020 if (wakeup >= 0) {
1021 internals::send_signal(wakeup, sig);
1022 }
1023}
1024
1026 bool result = false;
1027 _lock.lock();
1028 if (_value > 0) {
1029 _value--;
1030 result = true;
1031 }
1032 _lock.unlock();
1033 return result;
1034}
1035
1037 _lock.lock();
1038 if (_value > 0) {
1039 _value--;
1040 _lock.unlock();
1041 return;
1042 }
1044 _signals[_tail] = 0;
1045 next(_tail);
1046 _lock.unlock();
1048}
1049
1051 _lock.lock();
1052 if (_value > 0) {
1053 if (internals::send_signal(internals::vmem.current_process, sig))
1054 _value--;
1055 _lock.unlock();
1056 return false;
1057 }
1059 _signals[_tail] = sig;
1060 next(_tail);
1061 _lock.unlock();
1062 return true;
1063}
1064
1066 bool result = false;
1067 _lock.lock();
1068 for (int i = _head; i != _tail; next(i)) {
1069 if (_waiting[i] == internals::vmem.current_process) {
1070 int last = i;
1071 next(i);
1072 while (i != _tail) {
1073 _waiting[last] = _waiting[i];
1074 _signals[last] = _signals[i];
1075 last = i;
1076 next(i);
1077 }
1078 _tail = last;
1079 result = true;
1080 break;
1081 }
1082 }
1083 _lock.unlock();
1084 return result;
1085}
1086
1087void EventSet::add(Event *event) {
1088 event->_next = NULL;
1089 if (_head == NULL) {
1090 _head = _tail = event;
1091 } else {
1092 _tail->_next = event;
1093 _tail = event;
1094 }
1095}
1096
1098 size_t n = 0;
1099 for (Event *event = _head; event; event = event->_next) {
1100 if (!event->start_listen((int) (n++))) {
1101 break;
1102 }
1103 }
1105 for (Event *event = _head; event; event = event->_next) {
1106 event->stop_listen();
1107 }
1109 return (int) result;
1110}
1111
1112} // namespace vspace
1113#endif
1114#endif
int size(const CanonicalForm &f, const Variable &v)
int size ( const CanonicalForm & f, const Variable & v )
Definition: cf_ops.cc:600
int level(const CanonicalForm &f)
int i
Definition: cfEzgcd.cc:132
int p
Definition: cfModGcd.cc:4078
CanonicalForm fp
Definition: cfModGcd.cc:4102
CanonicalForm map(const CanonicalForm &primElem, const Variable &alpha, const CanonicalForm &F, const Variable &beta)
map from to such that is mapped onto
Definition: cf_map_ext.cc:504
void add(Event *event)
Definition: vspace.cc:1087
Event * _head
Definition: vspace.h:2581
Event * _tail
Definition: vspace.h:2581
Event * _next
Definition: vspace.h:2572
int _waiting[internals::MAX_PROCESS+1]
Definition: vspace.h:2348
void next(int &index)
Definition: vspace.h:2351
bool start_wait(internals::ipc_signal_t sig=0)
Definition: vspace.cc:1050
internals::ipc_signal_t _signals[internals::MAX_PROCESS+1]
Definition: vspace.h:2349
FastLock _lock
Definition: vspace.h:2358
bool stop_wait()
Definition: vspace.cc:1065
return result
Definition: facAbsBiFact.cc:75
int j
Definition: facHensel.cc:110
STATIC_VAR poly last
Definition: hdegree.cc:1151
NodeM * create()
Definition: janet.cc:757
STATIC_VAR int offset
Definition: janet.cc:29
ListNode * next
Definition: janet.h:31
#define SEEK_SET
Definition: mod2.h:113
char N base
Definition: ValueTraits.h:144
void accept_signals()
Definition: vspace.cc:965
void unlock_metapage()
Definition: vspace.cc:867
const vaddr_t VADDR_NULL
Definition: vspace.h:1417
void init_flock_struct(struct flock &lock_info, size_t offset, size_t len, bool lock)
Definition: vspace.cc:842
size_t vaddr_t
Definition: vspace.h:1414
static ProcessInfo & process_info(int processno)
Definition: vspace.cc:901
void lock_file(int fd, size_t offset, size_t len)
Definition: vspace.cc:851
void vmem_free(vaddr_t vaddr)
Definition: vspace.cc:752
Block * block_ptr(vaddr_t vaddr)
Definition: vspace.h:1637
vaddr_t vmem_alloc(size_t size)
Definition: vspace.cc:800
static void unlock_process(int processno)
Definition: vspace.cc:895
static const size_t MAX_SEGMENTS
Definition: vspace.h:1423
vaddr_t freelist[LOG2_SEGMENT_SIZE+1]
Definition: vspace.h:1511
static const size_t SEGMENT_SIZE
Definition: vspace.h:1424
static const size_t METABLOCK_SIZE
Definition: vspace.h:1420
static void lock_process(int processno)
Definition: vspace.cc:889
static const int LOG2_SEGMENT_SIZE
Definition: vspace.h:1421
ipc_signal_t wait_signal(bool lock)
Definition: vspace.cc:971
void lock_metapage()
Definition: vspace.cc:863
static const int MAX_PROCESS
Definition: vspace.h:1419
static VMem & vmem
Definition: vspace.h:1635
ProcessInfo process_info[MAX_PROCESS]
Definition: vspace.h:1513
static void lock_allocator()
Definition: vspace.cc:716
static segaddr_t find_buddy(segaddr_t addr, int level)
Definition: vspace.h:1690
ipc_signal_t check_signal(bool resume, bool lock)
Definition: vspace.cc:928
void init_metapage(bool create)
Definition: vspace.cc:871
void unlock_file(int fd, size_t offset, size_t len)
Definition: vspace.cc:857
bool send_signal(int processno, ipc_signal_t sig, bool lock)
Definition: vspace.cc:905
static int find_level(size_t size)
Definition: vspace.h:1681
size_t config[4]
Definition: vspace.cc:569
size_t segaddr_t
Definition: vspace.h:1412
static void unlock_allocator()
Definition: vspace.cc:720
static void print_freelists()
Definition: vspace.cc:724
pid_t fork_process()
Definition: vspace.cc:977
@ ErrOS
Definition: vspace.h:1380
@ ErrNone
Definition: vspace.h:1376
@ ErrFile
Definition: vspace.h:1378
internals::Mutex FastLock
Definition: vspace.h:2340
#define NULL
Definition: omList.c:12
#define block
Definition: scanner.cc:646
int status int void size_t count write
Definition: si_signals.h:67
int status read
Definition: si_signals.h:59
int status int void * buf
Definition: si_signals.h:59
int status int void size_t count int const void size_t count open
Definition: si_signals.h:73
int status int fd
Definition: si_signals.h:59
void ensure_is_mapped(vaddr_t vaddr)
Definition: vspace.h:1615
std::FILE * file_handle
Definition: vspace.h:1591
Block * block_ptr(vaddr_t vaddr)
Definition: vspace.h:1610
size_t segment_no(vaddr_t vaddr)
Definition: vspace.h:1599
void * mmap_segment(int seg)
Definition: vspace.cc:652
VSeg segment(vaddr_t vaddr)
Definition: vspace.h:1596
MetaPage * metapage
Definition: vspace.h:1589
static VMem vmem_global
Definition: vspace.h:1588
VSeg segments[MAX_SEGMENTS]
Definition: vspace.h:1594
vaddr_t vaddr(size_t segno, segaddr_t addr)
Definition: vspace.h:1602
segaddr_t segaddr(vaddr_t vaddr)
Definition: vspace.h:1605
ProcessChannel channels[MAX_PROCESS]
Definition: vspace.h:1595
Block * block_ptr(segaddr_t addr)
Definition: vspace.h:1571
#define assert(A)
Definition: svd_si.h:3
#define metapageaddr(field)
Definition: vspace.cc:576