1/*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach_assert.h>
30#include <mach/vm_types.h>
31#include <mach/mach_time.h>
32#include <kern/timer.h>
33#include <kern/clock.h>
34#include <kern/machine.h>
35#include <mach/machine.h>
36#include <mach/machine/vm_param.h>
37#include <mach_kdp.h>
38#include <kdp/kdp_udp.h>
39#if !MACH_KDP
40#include <kdp/kdp_callout.h>
41#endif /* !MACH_KDP */
42#include <arm/cpu_data.h>
43#include <arm/cpu_data_internal.h>
44#include <arm/caches_internal.h>
45
46#include <vm/vm_kern.h>
47#include <vm/vm_map.h>
48#include <vm/pmap.h>
49
50#include <arm/misc_protos.h>
51
52#include <sys/errno.h>
53
54#define INT_SIZE (BYTE_SIZE * sizeof (int))
55
56#define BCOPY_PHYS_SRC_IS_PHYS(flags) (((flags) & cppvPsrc) != 0)
57#define BCOPY_PHYS_DST_IS_PHYS(flags) (((flags) & cppvPsnk) != 0)
58#define BCOPY_PHYS_SRC_IS_USER(flags) (((flags) & (cppvPsrc | cppvKmap)) == 0)
59#define BCOPY_PHYS_DST_IS_USER(flags) (((flags) & (cppvPsnk | cppvKmap)) == 0)
60
61static kern_return_t
62bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags)
63{
64 unsigned int src_index;
65 unsigned int dst_index;
66 vm_offset_t src_offset;
67 vm_offset_t dst_offset;
68 unsigned int wimg_bits_src, wimg_bits_dst;
69 unsigned int cpu_num = 0;
70 ppnum_t pn_src;
71 ppnum_t pn_dst;
72 addr64_t end __assert_only;
73 kern_return_t res = KERN_SUCCESS;
74
75 assert(!__improbable(os_add_overflow(src, bytes, &end)));
76 assert(!__improbable(os_add_overflow(dst, bytes, &end)));
77
78 while ((bytes > 0) && (res == KERN_SUCCESS)) {
79 src_offset = src & PAGE_MASK;
80 dst_offset = dst & PAGE_MASK;
81 boolean_t use_copy_window_src = FALSE;
82 boolean_t use_copy_window_dst = FALSE;
83 vm_size_t count = bytes;
84 vm_size_t count2 = bytes;
85 if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
86 use_copy_window_src = !pmap_valid_address(src);
87 pn_src = (ppnum_t)(src >> PAGE_SHIFT);
88#if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
89 count = PAGE_SIZE - src_offset;
90 wimg_bits_src = pmap_cache_attributes(pn_src);
91 if ((wimg_bits_src & VM_WIMG_MASK) != VM_WIMG_DEFAULT)
92 use_copy_window_src = TRUE;
93#else
94 if (use_copy_window_src) {
95 wimg_bits_src = pmap_cache_attributes(pn_src);
96 count = PAGE_SIZE - src_offset;
97 }
98#endif
99 }
100 if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
101 // write preflighting needed for things like dtrace which may write static read-only mappings
102 use_copy_window_dst = (!pmap_valid_address(dst) || !mmu_kvtop_wpreflight(phystokv((pmap_paddr_t)dst)));
103 pn_dst = (ppnum_t)(dst >> PAGE_SHIFT);
104#if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
105 count2 = PAGE_SIZE - dst_offset;
106 wimg_bits_dst = pmap_cache_attributes(pn_dst);
107 if ((wimg_bits_dst & VM_WIMG_MASK) != VM_WIMG_DEFAULT)
108 use_copy_window_dst = TRUE;
109#else
110 if (use_copy_window_dst) {
111 wimg_bits_dst = pmap_cache_attributes(pn_dst);
112 count2 = PAGE_SIZE - dst_offset;
113 }
114#endif
115 }
116
117 char *tmp_src;
118 char *tmp_dst;
119
120 if (use_copy_window_src || use_copy_window_dst) {
121 mp_disable_preemption();
122 cpu_num = cpu_number();
123 }
124
125 if (use_copy_window_src) {
126 src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
127 tmp_src = (char*)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset);
128 } else if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
129 tmp_src = (char*)phystokv_range((pmap_paddr_t)src, &count);
130 } else {
131 tmp_src = (char*)src;
132 }
133 if (use_copy_window_dst) {
134 dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits_dst);
135 tmp_dst = (char*)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset);
136 } else if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
137 tmp_dst = (char*)phystokv_range((pmap_paddr_t)dst, &count2);
138 } else {
139 tmp_dst = (char*)dst;
140 }
141
142 if (count > count2)
143 count = count2;
144 if (count > bytes)
145 count = bytes;
146
147 if (BCOPY_PHYS_SRC_IS_USER(flags))
148 res = copyin((user_addr_t)src, tmp_dst, count);
149 else if (BCOPY_PHYS_DST_IS_USER(flags))
150 res = copyout(tmp_src, (user_addr_t)dst, count);
151 else
152 bcopy(tmp_src, tmp_dst, count);
153
154 if (use_copy_window_src)
155 pmap_unmap_cpu_windows_copy(src_index);
156 if (use_copy_window_dst)
157 pmap_unmap_cpu_windows_copy(dst_index);
158 if (use_copy_window_src || use_copy_window_dst)
159 mp_enable_preemption();
160
161 src += count;
162 dst += count;
163 bytes -= count;
164 }
165 return res;
166}
167
168void
169bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
170{
171 bcopy_phys_internal(src, dst, bytes, cppvPsrc | cppvPsnk);
172}
173
174void
175bzero_phys_nc(addr64_t src64, vm_size_t bytes)
176{
177 bzero_phys(src64, bytes);
178}
179
180/* Zero bytes starting at a physical address */
181void
182bzero_phys(addr64_t src, vm_size_t bytes)
183{
184 unsigned int wimg_bits;
185 unsigned int cpu_num = cpu_number();
186 ppnum_t pn;
187 addr64_t end __assert_only;
188
189 assert(!__improbable(os_add_overflow(src, bytes, &end)));
190
191 vm_offset_t offset = src & PAGE_MASK;
192 while (bytes > 0) {
193 vm_size_t count = bytes;
194
195 boolean_t use_copy_window = !pmap_valid_address(src);
196 pn = (ppnum_t)(src >> PAGE_SHIFT);
197#if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
198 count = PAGE_SIZE - offset;
199 wimg_bits = pmap_cache_attributes(pn);
200 if ((wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT)
201 use_copy_window = TRUE;
202#else
203 if (use_copy_window) {
204 wimg_bits = pmap_cache_attributes(pn);
205 count = PAGE_SIZE - offset;
206 }
207#endif
208 char *buf;
209 unsigned int index;
210 if (use_copy_window) {
211 mp_disable_preemption();
212 cpu_num = cpu_number();
213 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
214 buf = (char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset);
215 } else {
216 buf = (char *)phystokv_range((pmap_paddr_t)src, &count);
217 }
218
219 if (count > bytes)
220 count = bytes;
221
222 bzero(buf, count);
223
224 if (use_copy_window) {
225 pmap_unmap_cpu_windows_copy(index);
226 mp_enable_preemption();
227 }
228
229 src += count;
230 bytes -= count;
231 offset = 0;
232 }
233}
234
235/*
236 * Read data from a physical address.
237 */
238
239
240static unsigned long long
241ml_phys_read_data(pmap_paddr_t paddr, int size)
242{
243 unsigned int index;
244 unsigned int wimg_bits;
245 ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
246 ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT);
247 unsigned long long result = 0;
248 vm_offset_t copywindow_vaddr = 0;
249 unsigned char s1;
250 unsigned short s2;
251 unsigned int s4;
252
253 if (__improbable(pn_end != pn))
254 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
255
256#if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
257 if (pmap_valid_address(paddr)) {
258 switch (size) {
259 case 1:
260 s1 = *(volatile unsigned char *)phystokv(paddr);
261 result = s1;
262 break;
263 case 2:
264 s2 = *(volatile unsigned short *)phystokv(paddr);
265 result = s2;
266 break;
267 case 4:
268 s4 = *(volatile unsigned int *)phystokv(paddr);
269 result = s4;
270 break;
271 case 8:
272 result = *(volatile unsigned long long *)phystokv(paddr);
273 break;
274 default:
275 panic("Invalid size %d for ml_phys_read_data\n", size);
276 break;
277 }
278 return result;
279 }
280#endif
281
282 mp_disable_preemption();
283 wimg_bits = pmap_cache_attributes(pn);
284 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
285 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
286
287 switch (size) {
288 case 1:
289 s1 = *(volatile unsigned char *)copywindow_vaddr;
290 result = s1;
291 break;
292 case 2:
293 s2 = *(volatile unsigned short *)copywindow_vaddr;
294 result = s2;
295 break;
296 case 4:
297 s4 = *(volatile unsigned int *)copywindow_vaddr;
298 result = s4;
299 break;
300 case 8:
301 result = *(volatile unsigned long long*)copywindow_vaddr;
302 break;
303 default:
304 panic("Invalid size %d for ml_phys_read_data\n", size);
305 break;
306
307 }
308
309 pmap_unmap_cpu_windows_copy(index);
310 mp_enable_preemption();
311
312 return result;
313}
314
315unsigned int ml_phys_read( vm_offset_t paddr)
316{
317 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
318}
319
320unsigned int ml_phys_read_word(vm_offset_t paddr) {
321
322 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
323}
324
325unsigned int ml_phys_read_64(addr64_t paddr64)
326{
327 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
328}
329
330unsigned int ml_phys_read_word_64(addr64_t paddr64)
331{
332 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
333}
334
335unsigned int ml_phys_read_half(vm_offset_t paddr)
336{
337 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 2);
338}
339
340unsigned int ml_phys_read_half_64(addr64_t paddr64)
341{
342 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 2);
343}
344
345unsigned int ml_phys_read_byte(vm_offset_t paddr)
346{
347 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 1);
348}
349
350unsigned int ml_phys_read_byte_64(addr64_t paddr64)
351{
352 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 1);
353}
354
355unsigned long long ml_phys_read_double(vm_offset_t paddr)
356{
357 return ml_phys_read_data((pmap_paddr_t)paddr, 8);
358}
359
360unsigned long long ml_phys_read_double_64(addr64_t paddr64)
361{
362 return ml_phys_read_data((pmap_paddr_t)paddr64, 8);
363}
364
365
366
367/*
368 * Write data to a physical address.
369 */
370
371static void
372ml_phys_write_data(pmap_paddr_t paddr, unsigned long long data, int size)
373{
374 unsigned int index;
375 unsigned int wimg_bits;
376 ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
377 ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT);
378 vm_offset_t copywindow_vaddr = 0;
379
380 if (__improbable(pn_end != pn))
381 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
382
383#if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
384 if (pmap_valid_address(paddr)) {
385 switch (size) {
386 case 1:
387 *(volatile unsigned char *)phystokv(paddr) = (unsigned char)data;
388 return;
389 case 2:
390 *(volatile unsigned short *)phystokv(paddr) = (unsigned short)data;
391 return;
392 case 4:
393 *(volatile unsigned int *)phystokv(paddr) = (unsigned int)data;
394 return;
395 case 8:
396 *(volatile unsigned long long *)phystokv(paddr) = data;
397 return;
398 default:
399 panic("Invalid size %d for ml_phys_write_data\n", size);
400 }
401 }
402#endif
403
404 mp_disable_preemption();
405 wimg_bits = pmap_cache_attributes(pn);
406 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ|VM_PROT_WRITE, wimg_bits);
407 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
408
409 switch (size) {
410 case 1:
411 *(volatile unsigned char *)(copywindow_vaddr) =
412 (unsigned char)data;
413 break;
414 case 2:
415 *(volatile unsigned short *)(copywindow_vaddr) =
416 (unsigned short)data;
417 break;
418 case 4:
419 *(volatile unsigned int *)(copywindow_vaddr) =
420 (uint32_t)data;
421 break;
422 case 8:
423 *(volatile unsigned long long *)(copywindow_vaddr) =
424 (unsigned long long)data;
425 break;
426 default:
427 panic("Invalid size %d for ml_phys_write_data\n", size);
428 break;
429 }
430
431 pmap_unmap_cpu_windows_copy(index);
432 mp_enable_preemption();
433}
434
435void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
436{
437 ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
438}
439
440void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
441{
442 ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
443}
444
445void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
446{
447 ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
448}
449
450void ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
451{
452 ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
453}
454
455void ml_phys_write(vm_offset_t paddr, unsigned int data)
456{
457 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
458}
459
460void ml_phys_write_64(addr64_t paddr64, unsigned int data)
461{
462 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
463}
464
465void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
466{
467 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
468}
469
470void ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
471{
472 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
473}
474
475void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
476{
477 ml_phys_write_data((pmap_paddr_t)paddr, data, 8);
478}
479
480void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
481{
482 ml_phys_write_data((pmap_paddr_t)paddr64, data, 8);
483}
484
485
486/*
487 * Set indicated bit in bit string.
488 */
489void
490setbit(int bitno, int *s)
491{
492 s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
493}
494
495/*
496 * Clear indicated bit in bit string.
497 */
498void
499clrbit(int bitno, int *s)
500{
501 s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
502}
503
504/*
505 * Test if indicated bit is set in bit string.
506 */
507int
508testbit(int bitno, int *s)
509{
510 return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
511}
512
513/*
514 * Find first bit set in bit string.
515 */
516int
517ffsbit(int *s)
518{
519 int offset;
520
521 for (offset = 0; !*s; offset += INT_SIZE, ++s);
522 return offset + __builtin_ctz(*s);
523}
524
525int
526ffs(unsigned int mask)
527{
528 if (mask == 0)
529 return 0;
530
531 /*
532 * NOTE: cannot use __builtin_ffs because it generates a call to
533 * 'ffs'
534 */
535 return 1 + __builtin_ctz(mask);
536}
537
538int
539ffsll(unsigned long long mask)
540{
541 if (mask == 0)
542 return 0;
543
544 /*
545 * NOTE: cannot use __builtin_ffsll because it generates a call to
546 * 'ffsll'
547 */
548 return 1 + __builtin_ctzll(mask);
549}
550
551/*
552 * Find last bit set in bit string.
553 */
554int
555fls(unsigned int mask)
556{
557 if (mask == 0)
558 return 0;
559
560 return (sizeof (mask) << 3) - __builtin_clz(mask);
561}
562
563int
564flsll(unsigned long long mask)
565{
566 if (mask == 0)
567 return 0;
568
569 return (sizeof (mask) << 3) - __builtin_clzll(mask);
570}
571
572#undef bcmp
573int
574bcmp(
575 const void *pa,
576 const void *pb,
577 size_t len)
578{
579 const char *a = (const char *) pa;
580 const char *b = (const char *) pb;
581
582 if (len == 0)
583 return 0;
584
585 do
586 if (*a++ != *b++)
587 break;
588 while (--len);
589
590 /*
591 * Check for the overflow case but continue to handle the non-overflow
592 * case the same way just in case someone is using the return value
593 * as more than zero/non-zero
594 */
595 if ((len & 0xFFFFFFFF00000000ULL) && !(len & 0x00000000FFFFFFFFULL))
596 return 0xFFFFFFFFL;
597 else
598 return (int)len;
599}
600
601#undef memcmp
602int
603memcmp(const void *s1, const void *s2, size_t n)
604{
605 if (n != 0) {
606 const unsigned char *p1 = s1, *p2 = s2;
607
608 do {
609 if (*p1++ != *p2++)
610 return (*--p1 - *--p2);
611 } while (--n != 0);
612 }
613 return (0);
614}
615
616kern_return_t
617copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
618{
619 if ((which & (cppvPsrc | cppvPsnk)) == 0) /* Make sure that only one is virtual */
620 panic("%s: no more than 1 parameter may be virtual", __func__);
621
622 kern_return_t res = bcopy_phys_internal(source, sink, size, which);
623
624#ifndef __ARM_COHERENT_IO__
625 if (which & cppvFsrc)
626 flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
627
628 if (which & cppvFsnk)
629 flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
630#endif
631
632 return res;
633}
634
635#if MACH_ASSERT
636
637extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
638
639/*
640 * Machine-dependent routine to fill in an array with up to callstack_max
641 * levels of return pc information.
642 */
643void
644machine_callstack(
645 uintptr_t * buf,
646 vm_size_t callstack_max)
647{
648 /* Captures the USER call stack */
649 uint32_t i=0;
650
651 struct arm_saved_state *state = find_user_regs(current_thread());
652
653 if (!state) {
654 while (i<callstack_max)
655 buf[i++] = 0;
656 } else {
657 if (is_saved_state64(state)) {
658 uint64_t frame[2];
659 buf[i++] = (uintptr_t)get_saved_state_pc(state);
660 frame[0] = get_saved_state_fp(state);
661 while (i<callstack_max && frame[0] != 0) {
662 if (copyinframe(frame[0], (void*) frame, TRUE))
663 break;
664 buf[i++] = (uintptr_t)frame[1];
665 }
666 }
667 else {
668 uint32_t frame[2];
669 buf[i++] = (uintptr_t)get_saved_state_pc(state);
670 frame[0] = (uint32_t)get_saved_state_fp(state);
671 while (i<callstack_max && frame[0] != 0) {
672 if (copyinframe(frame[0], (void*) frame, FALSE))
673 break;
674 buf[i++] = (uintptr_t)frame[1];
675 }
676 }
677
678 while (i<callstack_max)
679 buf[i++] = 0;
680 }
681}
682
683#endif /* MACH_ASSERT */
684
685int
686clr_be_bit(void)
687{
688 panic("clr_be_bit");
689 return 0;
690}
691
692boolean_t
693ml_probe_read(
694 __unused vm_offset_t paddr,
695 __unused unsigned int *val)
696{
697 panic("ml_probe_read() unimplemented");
698 return 1;
699}
700
701boolean_t
702ml_probe_read_64(
703 __unused addr64_t paddr,
704 __unused unsigned int *val)
705{
706 panic("ml_probe_read_64() unimplemented");
707 return 1;
708}
709
710
711void
712ml_thread_policy(
713 __unused thread_t thread,
714 __unused unsigned policy_id,
715 __unused unsigned policy_info)
716{
717 // <rdar://problem/7141284>: Reduce print noise
718 // kprintf("ml_thread_policy() unimplemented\n");
719}
720
721void
722panic_unimplemented()
723{
724 panic("Not yet implemented.");
725}
726
727/* ARM64_TODO <rdar://problem/9198953> */
728void abort(void);
729
730void
731abort()
732{
733 panic("Abort.");
734}
735
736
737#if !MACH_KDP
738void
739kdp_register_callout(kdp_callout_fn_t fn, void *arg)
740{
741#pragma unused(fn,arg)
742}
743#endif
744
745