Andriy Gapon
2014-Feb-12 17:06 UTC
panic: vm_page_unwire: page 0xfffffe104d4c9cd8's wire count is zero
I've got an odd panic on a stable/9 system: (kgdb) bt #0 doadump (textdump=1) at /usr/src/sys/kern/kern_shutdown.c:266 #1 0xffffffff808f9814 in kern_reboot (howto=260) at /usr/src/sys/kern/kern_shutdown.c:449 #2 0xffffffff808f9d07 in panic (fmt=0x1 <Address 0x1 out of bounds>) at /usr/src/sys/kern/kern_shutdown.c:637 #3 0xffffffff80b81b45 in vm_page_unwire (m=<value optimized out>, activate=<value optimized out>) at /usr/src/sys/vm/vm_page.c:2018 #4 0xffffffff80b6e062 in vm_fault_unwire (map=<value optimized out>, start=<value optimized out>, end=4202496, fictitious=0) at /usr/src/sys/vm/vm_fault.c:1238 #5 0xffffffff80b7617f in vm_map_delete (map=0xfffffe01a8eeaaf0, start=4096, end=140737488355328) at /usr/src/sys/vm/vm_map.c:2713 #6 0xffffffff80b764e1 in vm_map_remove (map=0xfffffe01a8eeaaf0, start=4096, end=140737488355328) at /usr/src/sys/vm/vm_map.c:2903 #7 0xffffffff80b79837 in vmspace_exit (td=0xfffffe01b8056490) at /usr/src/sys/vm/vm_map.c:350 #8 0xffffffff808c2f00 in exit1 (td=0xfffffe01b8056490, rv=0) at /usr/src/sys/kern/kern_exit.c:322 #9 0xffffffff808c424e in sys_sys_exit (td=<value optimized out>, uap=<value optimized out>) at /usr/src/sys/kern/kern_exit.c:121 #10 0xffffffff80cec26a in amd64_syscall (td=0xfffffe01b8056490, traced=0) at subr_syscall.c:135 #11 0xffffffff80cd64c7 in Xfast_syscall () at /usr/src/sys/amd64/amd64/exception.S:391 (kgdb) p *entry $2 = { prev = 0xfffffe01a8eeaaf0, next = 0xfffffe01a8f02500, left = 0x0, right = 0xfffffe01b805f800, start = 4194304, end = 4202496, avail_ssize = 0, adj_free = 2093056, max_free = 140703107510272, object = { vm_object = 0xfffffe025d23b0e8, sub_map = 0xfffffe025d23b0e8 }, offset = 0, eflags = 1068, // MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY | MAP_ENTRY_USER_WIRED | MAP_ENTRY_NOCOREDUMP protection = 5 '\005', max_protection = 7 '\a', inheritance = 1 '\001', read_ahead = 0 '\0', wired_count = 1, next_read = 2, cred = 0x0, wiring_thread = 0x0 } (kgdb) p *entry->object.vm_object $4 = { mtx = { lock_object = { lo_name = 0xffffffff80fc2940 "vm object", lo_flags = 21168128, lo_data = 0, lo_witness = 0x0 }, mtx_lock = 4 }, object_list = { tqe_next = 0xfffffe027019c828, tqe_prev = 0xfffffe0263ca0a18 }, shadow_head = { lh_first = 0x0 }, shadow_list = { le_next = 0xfffffe0263d24828, le_prev = 0xfffffe0029189a28 }, memq = { tqh_first = 0xfffffe104d4c9cd8, tqh_last = 0xfffffe10520616b0 }, root = 0xfffffe104d4c9cd8, size = 3, generation = 1, ref_count = 1, shadow_count = 0, memattr = 6 '\006', type = 2 '\002', // OBJT_VNODE flags = 4100, // OBJ_ACTIVE | OBJ_COLORED pg_color = 0, pad1 = 0, resident_page_count = 2, backing_object = 0x0, backing_object_offset = 0, pager_object_list = { tqe_next = 0x0, tqe_prev = 0x0 }, rvq = { lh_first = 0x0 }, cache = 0x0, handle = 0xfffffe0263c4b000, un_pager = { vnp = { vnp_size = 9872, writemappings = 0 }, devp = { devp_pglist = { tqh_first = 0x2690, tqh_last = 0x0 }, ops = 0x0 }, sgp = { sgp_pglist = { tqh_first = 0x2690, tqh_last = 0x0 } }, swp = { swp_bcount = 9872 } }, cred = 0x0, charge = 0, paging_in_progress = 0 } (kgdb) p *(vnode_t)entry->object.vm_object->handle $6 = { v_type = VREG, v_tag = 0xffffffff8185f1ac "zfs", v_op = 0xffffffff8186b280, v_data = 0xfffffe0263c62000, v_mount = 0xfffffe001cec09a8, v_nmntvnodes = { tqe_next = 0xfffffe025d8035f8, tqe_prev = 0xfffffe027038a620 }, v_un = { vu_mount = 0x0, vu_socket = 0x0, vu_cdev = 0x0, vu_fifoinfo = 0x0 }, v_hashlist = { le_next = 0x0, le_prev = 0x0 }, v_hash = 2157506860, v_cache_src = { lh_first = 0x0 }, v_cache_dst = { tqh_first = 0x0, tqh_last = 0xfffffe0263c4b060 }, v_cache_dd = 0x0, v_cstart = 0, v_lasta = 0, v_lastw = 0, v_clen = 0, v_lock = { lock_object = { lo_name = 0xffffffff8185f1ac "zfs", lo_flags = 108724224, lo_data = 0, lo_witness = 0x0 }, lk_lock = 1, lk_exslpfail = 0, lk_timo = 51, lk_pri = 96 }, v_interlock = { lock_object = { lo_name = 0xffffffff80f9ae39 "vnode interlock", lo_flags = 16973824, lo_data = 0, lo_witness = 0x0 }, mtx_lock = 4 }, v_vnlock = 0xfffffe0263c4b098, v_holdcnt = 3, v_usecount = 2, v_iflag = 512, // VI_ACTIVE v_vflag = 32, // VV_TEXT v_writecount = 0, v_actfreelist = { tqe_next = 0xfffffe027038a5f8, tqe_prev = 0xfffffe0263c7d708 }, v_bufobj = { bo_mtx = { lock_object = { lo_name = 0xffffffff80f9ae49 "bufobj interlock", lo_flags = 16973824, lo_data = 0, lo_witness = 0x0 }, mtx_lock = 4 }, bo_clean = { bv_hd = { tqh_first = 0x0, tqh_last = 0xfffffe0263c4b140 }, bv_root = 0x0, bv_cnt = 0 }, bo_dirty = { bv_hd = { tqh_first = 0x0, tqh_last = 0xfffffe0263c4b160 }, bv_root = 0x0, bv_cnt = 0 }, bo_numoutput = 0, bo_flag = 0, bo_ops = 0xffffffff81316f20, bo_bsize = 131072, bo_object = 0xfffffe025d23b0e8, bo_synclist = { le_next = 0x0, le_prev = 0x0 }, bo_private = 0xfffffe0263c4b000, __bo_vnode = 0xfffffe0263c4b000 }, v_pollinfo = 0x0, v_label = 0x0, v_lockf = 0x0, v_rl = { rl_waiters = { tqh_first = 0x0, tqh_last = 0xfffffe0263c4b1e0 }, rl_currdep = 0x0 }, v_fullpath = '\0' <repeats 1023 times> } (kgdb) i loc pa = <value optimized out> va = 4194304 m = 0xfffffe104d4c9cd8 pmap = 0xfffffe01a8eeac28 (kgdb) p *m $7 = { pageq = { tqe_next = 0xfffffe106fb96e68, tqe_prev = 0xfffffe10520616a0 }, listq = { tqe_next = 0xfffffe10520616a0, tqe_prev = 0xfffffe025d23b130 }, left = 0x0, right = 0xfffffe10520616a0, object = 0xfffffe025d23b0e8, pindex = 0, phys_addr = 41917632512, md = { pv_list = { tqh_first = 0xfffffe02703de0b8, tqh_last = 0xfffffe02703de0c0 }, pat_mode = 6 }, queue = 1 '\001', segind = 3 '\003', hold_count = 0, order = 13 '\r', pool = 0 '\0', cow = 0, wire_count = 0, aflags = 0 '\0', flags = 0 '\0', oflags = 0, act_count = 64 '@', busy = 0 '\0', valid = 255 '?', dirty = 0 '\0' } (kgdb) p *m->right $8 = { pageq = { tqe_next = 0xfffffe104d4c9cd8, tqe_prev = 0xfffffe105187aab8 }, listq = { tqe_next = 0x0, tqe_prev = 0xfffffe104d4c9ce8 }, left = 0x0, right = 0x0, object = 0xfffffe025d23b0e8, pindex = 1, phys_addr = 44623183872, md = { pv_list = { tqh_first = 0xfffffe02703de0a0, tqh_last = 0xfffffe02703de0a8 }, pat_mode = 6 }, queue = 1 '\001', segind = 3 '\003', hold_count = 0, order = 13 '\r', pool = 0 '\0', cow = 0, wire_count = 0, aflags = 0 '\0', flags = 0 '\0', oflags = 0, act_count = 64 '@', busy = 0 '\0', valid = 255 '?', dirty = 0 '\0' } (kgdb) p *m->md.pv_list.tqh_first $9 = { pv_va = 4194304, pv_list = { tqe_next = 0x0, tqe_prev = 0xfffffe104d4c9d20 } } Everything looks pretty consistent and sane to me. Except, of course, for wire_count in both resident pages. The process in question is watchdogd which seems to mlockall(MCL_CURRENT | MCL_FUTURE) and that explains why the mapping is wired. -- Andriy Gapon