flashrom 

flashrom Svn Source Tree

Root/trunk/physmap.c

1/*
2 * This file is part of the flashrom project.
3 *
4 * Copyright (C) 2009 Peter Stuge <peter@stuge.se>
5 * Copyright (C) 2009 coresystems GmbH
6 * Copyright (C) 2010 Carl-Daniel Hailfinger
7 * Copyright (C) 2010 Rudolf Marek <r.marek@assembler.cz>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#include <unistd.h>
24#include <stdbool.h>
25#include <stdio.h>
26#include <stdlib.h>
27#include <string.h>
28#include <errno.h>
29#include "flash.h"
30#include "programmer.h"
31#include "hwaccess.h"
32
33#if !defined(__DJGPP__) && !defined(__LIBPAYLOAD__)
34/* No file access needed/possible to get mmap access permissions or access MSR. */
35#include <sys/stat.h>
36#include <fcntl.h>
37#endif
38
39#ifdef __DJGPP__
40#include <dpmi.h>
41#include <sys/nearptr.h>
42
43#define MEM_DEV "dpmi"
44
45static void *realmem_map;
46
47static void *map_first_meg(uintptr_t phys_addr, size_t len)
48{
49if (realmem_map)
50return realmem_map + phys_addr;
51
52realmem_map = valloc(1024 * 1024);
53
54if (!realmem_map)
55return ERROR_PTR;
56
57if (__djgpp_map_physical_memory(realmem_map, (1024 * 1024), 0)) {
58free(realmem_map);
59realmem_map = NULL;
60return ERROR_PTR;
61}
62
63return realmem_map + phys_addr;
64}
65
66static void *sys_physmap(uintptr_t phys_addr, size_t len)
67{
68int ret;
69__dpmi_meminfo mi;
70
71/* Enable 4GB limit on DS descriptor. */
72if (!__djgpp_nearptr_enable())
73return ERROR_PTR;
74
75if ((phys_addr + len - 1) < (1024 * 1024)) {
76/* We need to use another method to map first 1MB. */
77return map_first_meg(phys_addr, len);
78}
79
80mi.address = phys_addr;
81mi.size = len;
82ret = __dpmi_physical_address_mapping(&mi);
83
84if (ret != 0)
85return ERROR_PTR;
86
87return (void *) mi.address + __djgpp_conventional_base;
88}
89
90#define sys_physmap_rw_uncachedsys_physmap
91#define sys_physmap_ro_cachedsys_physmap
92
93void sys_physunmap_unaligned(void *virt_addr, size_t len)
94{
95__dpmi_meminfo mi;
96
97/* There is no known way to unmap the first 1 MB. The DPMI server will
98 * do this for us on exit.
99 */
100if ((virt_addr >= realmem_map) &&
101 ((virt_addr + len) <= (realmem_map + (1024 * 1024)))) {
102return;
103}
104
105mi.address = (unsigned long) virt_addr;
106__dpmi_free_physical_address_mapping(&mi);
107}
108
109#elif defined(__LIBPAYLOAD__)
110#include <arch/virtual.h>
111
112#define MEM_DEV ""
113
114void *sys_physmap(uintptr_t phys_addr, size_t len)
115{
116return (void *)phys_to_virt(phys_addr);
117}
118
119#define sys_physmap_rw_uncachedsys_physmap
120#define sys_physmap_ro_cachedsys_physmap
121
122void sys_physunmap_unaligned(void *virt_addr, size_t len)
123{
124}
125#elif defined(__MACH__) && defined(__APPLE__)
126
127#define MEM_DEV "DirectHW"
128
129static void *sys_physmap(uintptr_t phys_addr, size_t len)
130{
131/* The short form of ?: is a GNU extension.
132 * FIXME: map_physical returns NULL both for errors and for success
133 * if the region is mapped at virtual address zero. If in doubt, report
134 * an error until a better interface exists.
135 */
136return map_physical(phys_addr, len) ? : ERROR_PTR;
137}
138
139/* The OS X driver does not differentiate between mapping types. */
140#define sys_physmap_rw_uncachedsys_physmap
141#define sys_physmap_ro_cachedsys_physmap
142
143void sys_physunmap_unaligned(void *virt_addr, size_t len)
144{
145unmap_physical(virt_addr, len);
146}
147
148#else
149#include <sys/mman.h>
150
151#if defined (__sun) && (defined(__i386) || defined(__amd64))
152# define MEM_DEV "/dev/xsvc"
153#else
154# define MEM_DEV "/dev/mem"
155#endif
156
157static int fd_mem = -1;
158static int fd_mem_cached = -1;
159
160/* For MMIO access. Must be uncached, doesn't make sense to restrict to ro. */
161static void *sys_physmap_rw_uncached(uintptr_t phys_addr, size_t len)
162{
163void *virt_addr;
164
165if (-1 == fd_mem) {
166/* Open the memory device UNCACHED. Important for MMIO. */
167if (-1 == (fd_mem = open(MEM_DEV, O_RDWR | O_SYNC))) {
168msg_perr("Critical error: open(" MEM_DEV "): %s\n", strerror(errno));
169return ERROR_PTR;
170}
171}
172
173virt_addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, fd_mem, (off_t)phys_addr);
174return MAP_FAILED == virt_addr ? ERROR_PTR : virt_addr;
175}
176
177/* For reading DMI/coreboot/whatever tables. We should never write, and we
178 * do not care about caching.
179 */
180static void *sys_physmap_ro_cached(uintptr_t phys_addr, size_t len)
181{
182void *virt_addr;
183
184if (-1 == fd_mem_cached) {
185/* Open the memory device CACHED. */
186if (-1 == (fd_mem_cached = open(MEM_DEV, O_RDWR))) {
187msg_perr("Critical error: open(" MEM_DEV "): %s\n", strerror(errno));
188return ERROR_PTR;
189}
190}
191
192virt_addr = mmap(NULL, len, PROT_READ, MAP_SHARED, fd_mem_cached, (off_t)phys_addr);
193return MAP_FAILED == virt_addr ? ERROR_PTR : virt_addr;
194}
195
196void sys_physunmap_unaligned(void *virt_addr, size_t len)
197{
198munmap(virt_addr, len);
199}
200#endif
201
202#define PHYSM_RW0
203#define PHYSM_RO1
204#define PHYSM_NOCLEANUP0
205#define PHYSM_CLEANUP1
206#define PHYSM_EXACT0
207#define PHYSM_ROUND1
208
209/* Round start to nearest page boundary below and set len so that the resulting address range ends at the lowest
210 * possible page boundary where the original address range is still entirely contained. It returns the
211 * difference between the rounded start address and the original start address. */
212static uintptr_t round_to_page_boundaries(uintptr_t *start, size_t *len)
213{
214uintptr_t page_size = getpagesize();
215uintptr_t page_mask = ~(page_size-1);
216uintptr_t end = *start + *len;
217uintptr_t old_start = *start;
218msg_gspew("page_size=%" PRIxPTR "\n", page_size);
219msg_gspew("pre-rounding: start=0x%0*" PRIxPTR ", len=0x%zx, end=0x%0*" PRIxPTR "\n",
220 PRIxPTR_WIDTH, *start, *len, PRIxPTR_WIDTH, end);
221*start = *start & page_mask;
222end = (end + page_size - 1) & page_mask;
223*len = end - *start;
224msg_gspew("post-rounding: start=0x%0*" PRIxPTR ", len=0x%zx, end=0x%0*" PRIxPTR "\n",
225 PRIxPTR_WIDTH, *start, *len, PRIxPTR_WIDTH, *start + *len);
226return old_start - *start;
227}
228
229struct undo_physmap_data {
230void *virt_addr;
231size_t len;
232};
233
234static int undo_physmap(void *data)
235{
236if (data == NULL) {
237msg_perr("%s: tried to physunmap without valid data!\n", __func__);
238return 1;
239}
240struct undo_physmap_data *d = data;
241physunmap_unaligned(d->virt_addr, d->len);
242free(data);
243return 0;
244}
245
246static void *physmap_common(const char *descr, uintptr_t phys_addr, size_t len, bool readonly, bool autocleanup,
247 bool round)
248{
249void *virt_addr;
250uintptr_t offset = 0;
251
252if (len == 0) {
253msg_pspew("Not mapping %s, zero size at 0x%0*" PRIxPTR ".\n", descr, PRIxPTR_WIDTH, phys_addr);
254return ERROR_PTR;
255}
256
257if (round)
258offset = round_to_page_boundaries(&phys_addr, &len);
259
260if (readonly)
261virt_addr = sys_physmap_ro_cached(phys_addr, len);
262else
263virt_addr = sys_physmap_rw_uncached(phys_addr, len);
264
265if (ERROR_PTR == virt_addr) {
266if (NULL == descr)
267descr = "memory";
268msg_perr("Error accessing %s, 0x%zx bytes at 0x%0*" PRIxPTR "\n",
269 descr, len, PRIxPTR_WIDTH, phys_addr);
270msg_perr(MEM_DEV " mmap failed: %s\n", strerror(errno));
271#ifdef __linux__
272if (EINVAL == errno) {
273msg_perr("In Linux this error can be caused by the CONFIG_NONPROMISC_DEVMEM (<2.6.27),\n");
274msg_perr("CONFIG_STRICT_DEVMEM (>=2.6.27) and CONFIG_X86_PAT kernel options.\n");
275msg_perr("Please check if either is enabled in your kernel before reporting a failure.\n");
276msg_perr("You can override CONFIG_X86_PAT at boot with the nopat kernel parameter but\n");
277msg_perr("disabling the other option unfortunately requires a kernel recompile. Sorry!\n");
278}
279#elif defined (__OpenBSD__)
280msg_perr("Please set securelevel=-1 in /etc/rc.securelevel "
281 "and reboot, or reboot into\n"
282 "single user mode.\n");
283#endif
284return ERROR_PTR;
285}
286
287if (autocleanup) {
288struct undo_physmap_data *d = malloc(sizeof(struct undo_physmap_data));
289if (d == NULL) {
290msg_perr("%s: Out of memory!\n", __func__);
291physunmap_unaligned(virt_addr, len);
292return ERROR_PTR;
293}
294
295d->virt_addr = virt_addr;
296d->len = len;
297if (register_shutdown(undo_physmap, d) != 0) {
298msg_perr("%s: Could not register shutdown function!\n", __func__);
299physunmap_unaligned(virt_addr, len);
300return ERROR_PTR;
301}
302}
303
304return virt_addr + offset;
305}
306
307void physunmap_unaligned(void *virt_addr, size_t len)
308{
309/* No need to check for zero size, such mappings would have yielded ERROR_PTR. */
310if (virt_addr == ERROR_PTR) {
311msg_perr("Trying to unmap a nonexisting mapping!\n"
312 "Please report a bug at flashrom@flashrom.org\n");
313return;
314}
315
316sys_physunmap_unaligned(virt_addr, len);
317}
318
319void physunmap(void *virt_addr, size_t len)
320{
321uintptr_t tmp;
322
323/* No need to check for zero size, such mappings would have yielded ERROR_PTR. */
324if (virt_addr == ERROR_PTR) {
325msg_perr("Trying to unmap a nonexisting mapping!\n"
326 "Please report a bug at flashrom@flashrom.org\n");
327return;
328}
329tmp = (uintptr_t)virt_addr;
330/* We assume that the virtual address of a page-aligned physical address is page-aligned as well. By
331 * extension, rounding a virtual unaligned address as returned by physmap should yield the same offset
332 * between rounded and original virtual address as between rounded and original physical address.
333 */
334round_to_page_boundaries(&tmp, &len);
335virt_addr = (void *)tmp;
336physunmap_unaligned(virt_addr, len);
337}
338
339void *physmap(const char *descr, uintptr_t phys_addr, size_t len)
340{
341return physmap_common(descr, phys_addr, len, PHYSM_RW, PHYSM_NOCLEANUP, PHYSM_ROUND);
342}
343
344void *rphysmap(const char *descr, uintptr_t phys_addr, size_t len)
345{
346return physmap_common(descr, phys_addr, len, PHYSM_RW, PHYSM_CLEANUP, PHYSM_ROUND);
347}
348
349void *physmap_ro(const char *descr, uintptr_t phys_addr, size_t len)
350{
351return physmap_common(descr, phys_addr, len, PHYSM_RO, PHYSM_NOCLEANUP, PHYSM_ROUND);
352}
353
354void *physmap_ro_unaligned(const char *descr, uintptr_t phys_addr, size_t len)
355{
356return physmap_common(descr, phys_addr, len, PHYSM_RO, PHYSM_NOCLEANUP, PHYSM_EXACT);
357}
358
359/* MSR abstraction implementations for Linux, OpenBSD, FreeBSD/Dragonfly, OSX, libpayload
360 * and a non-working default implemenation on the bottom. See also hwaccess.h for some (re)declarations. */
361#if defined(__i386__) || defined(__x86_64__)
362
363#ifdef __linux__
364/*
365 * Reading and writing to MSRs, however requires instructions rdmsr/wrmsr,
366 * which are ring0 privileged instructions so only the kernel can do the
367 * read/write. This function, therefore, requires that the msr kernel module
368 * be loaded to access these instructions from user space using device
369 * /dev/cpu/0/msr.
370 */
371
372static int fd_msr = -1;
373
374msr_t rdmsr(int addr)
375{
376uint32_t buf[2];
377msr_t msr = { 0xffffffff, 0xffffffff };
378
379if (lseek(fd_msr, (off_t) addr, SEEK_SET) == -1) {
380msg_perr("Could not lseek() MSR: %s\n", strerror(errno));
381close(fd_msr);
382exit(1);
383}
384
385if (read(fd_msr, buf, 8) == 8) {
386msr.lo = buf[0];
387msr.hi = buf[1];
388return msr;
389}
390
391if (errno != EIO) {
392// A severe error.
393msg_perr("Could not read() MSR: %s\n", strerror(errno));
394close(fd_msr);
395exit(1);
396}
397
398return msr;
399}
400
401int wrmsr(int addr, msr_t msr)
402{
403uint32_t buf[2];
404buf[0] = msr.lo;
405buf[1] = msr.hi;
406
407if (lseek(fd_msr, (off_t) addr, SEEK_SET) == -1) {
408msg_perr("Could not lseek() MSR: %s\n", strerror(errno));
409close(fd_msr);
410exit(1);
411}
412
413if (write(fd_msr, buf, 8) != 8 && errno != EIO) {
414msg_perr("Could not write() MSR: %s\n", strerror(errno));
415close(fd_msr);
416exit(1);
417}
418
419/* Some MSRs must not be written. */
420if (errno == EIO)
421return -1;
422
423return 0;
424}
425
426int setup_cpu_msr(int cpu)
427{
428char msrfilename[64];
429memset(msrfilename, 0, sizeof(msrfilename));
430snprintf(msrfilename, sizeof(msrfilename), "/dev/cpu/%d/msr", cpu);
431
432if (fd_msr != -1) {
433msg_pinfo("MSR was already initialized\n");
434return -1;
435}
436
437fd_msr = open(msrfilename, O_RDWR);
438
439if (fd_msr < 0) {
440msg_perr("Error while opening %s: %s\n", msrfilename, strerror(errno));
441msg_pinfo("Did you run 'modprobe msr'?\n");
442return -1;
443}
444
445return 0;
446}
447
448void cleanup_cpu_msr(void)
449{
450if (fd_msr == -1) {
451msg_pinfo("No MSR initialized.\n");
452return;
453}
454
455close(fd_msr);
456
457/* Clear MSR file descriptor. */
458fd_msr = -1;
459}
460#elif defined(__OpenBSD__) && defined (__i386__) /* This does only work for certain AMD Geode LX systems see amdmsr(4). */
461#include <sys/ioctl.h>
462#include <machine/amdmsr.h>
463
464static int fd_msr = -1;
465
466msr_t rdmsr(int addr)
467{
468struct amdmsr_req args;
469
470msr_t msr = { 0xffffffff, 0xffffffff };
471
472args.addr = (uint32_t)addr;
473
474if (ioctl(fd_msr, RDMSR, &args) < 0) {
475msg_perr("Error while executing RDMSR ioctl: %s\n", strerror(errno));
476close(fd_msr);
477exit(1);
478}
479
480msr.lo = args.val & 0xffffffff;
481msr.hi = args.val >> 32;
482
483return msr;
484}
485
486int wrmsr(int addr, msr_t msr)
487{
488struct amdmsr_req args;
489
490args.addr = addr;
491args.val = (((uint64_t)msr.hi) << 32) | msr.lo;
492
493if (ioctl(fd_msr, WRMSR, &args) < 0) {
494msg_perr("Error while executing WRMSR ioctl: %s\n", strerror(errno));
495close(fd_msr);
496exit(1);
497}
498
499return 0;
500}
501
502int setup_cpu_msr(int cpu)
503{
504char msrfilename[64];
505memset(msrfilename, 0, sizeof(msrfilename));
506snprintf(msrfilename, sizeof(msrfilename), "/dev/amdmsr");
507
508if (fd_msr != -1) {
509msg_pinfo("MSR was already initialized\n");
510return -1;
511}
512
513fd_msr = open(msrfilename, O_RDWR);
514
515if (fd_msr < 0) {
516msg_perr("Error while opening %s: %s\n", msrfilename, strerror(errno));
517return -1;
518}
519
520return 0;
521}
522
523void cleanup_cpu_msr(void)
524{
525if (fd_msr == -1) {
526msg_pinfo("No MSR initialized.\n");
527return;
528}
529
530close(fd_msr);
531
532/* Clear MSR file descriptor. */
533fd_msr = -1;
534}
535
536#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
537#include <sys/ioctl.h>
538
539typedef struct {
540int msr;
541uint64_t data;
542} cpu_msr_args_t;
543#define CPU_RDMSR _IOWR('c', 1, cpu_msr_args_t)
544#define CPU_WRMSR _IOWR('c', 2, cpu_msr_args_t)
545
546static int fd_msr = -1;
547
548msr_t rdmsr(int addr)
549{
550cpu_msr_args_t args;
551
552msr_t msr = { 0xffffffff, 0xffffffff };
553
554args.msr = addr;
555
556if (ioctl(fd_msr, CPU_RDMSR, &args) < 0) {
557msg_perr("Error while executing CPU_RDMSR ioctl: %s\n", strerror(errno));
558close(fd_msr);
559exit(1);
560}
561
562msr.lo = args.data & 0xffffffff;
563msr.hi = args.data >> 32;
564
565return msr;
566}
567
568int wrmsr(int addr, msr_t msr)
569{
570cpu_msr_args_t args;
571
572args.msr = addr;
573args.data = (((uint64_t)msr.hi) << 32) | msr.lo;
574
575if (ioctl(fd_msr, CPU_WRMSR, &args) < 0) {
576msg_perr("Error while executing CPU_WRMSR ioctl: %s\n", strerror(errno));
577close(fd_msr);
578exit(1);
579}
580
581return 0;
582}
583
584int setup_cpu_msr(int cpu)
585{
586char msrfilename[64];
587memset(msrfilename, 0, sizeof(msrfilename));
588snprintf(msrfilename, sizeof(msrfilename), "/dev/cpu%d", cpu);
589
590if (fd_msr != -1) {
591msg_pinfo("MSR was already initialized\n");
592return -1;
593}
594
595fd_msr = open(msrfilename, O_RDWR);
596
597if (fd_msr < 0) {
598msg_perr("Error while opening %s: %s\n", msrfilename, strerror(errno));
599msg_pinfo("Did you install ports/sysutils/devcpu?\n");
600return -1;
601}
602
603return 0;
604}
605
606void cleanup_cpu_msr(void)
607{
608if (fd_msr == -1) {
609msg_pinfo("No MSR initialized.\n");
610return;
611}
612
613close(fd_msr);
614
615/* Clear MSR file descriptor. */
616fd_msr = -1;
617}
618
619#elif defined(__MACH__) && defined(__APPLE__)
620/* rdmsr() and wrmsr() are provided by DirectHW which needs neither setup nor cleanup. */
621int setup_cpu_msr(int cpu)
622{
623// Always succeed for now
624return 0;
625}
626
627void cleanup_cpu_msr(void)
628{
629// Nothing, yet.
630}
631#elif defined(__LIBPAYLOAD__)
632msr_t libpayload_rdmsr(int addr)
633{
634msr_t msr;
635unsigned long long val = _rdmsr(addr);
636msr.lo = val & 0xffffffff;
637msr.hi = val >> 32;
638return msr;
639}
640
641int libpayload_wrmsr(int addr, msr_t msr)
642{
643_wrmsr(addr, msr.lo | ((unsigned long long)msr.hi << 32));
644return 0;
645}
646
647int setup_cpu_msr(int cpu)
648{
649return 0;
650}
651
652void cleanup_cpu_msr(void)
653{
654}
655#else
656/* default MSR implementation */
657msr_t rdmsr(int addr)
658{
659msr_t ret = { 0xffffffff, 0xffffffff };
660
661return ret;
662}
663
664int wrmsr(int addr, msr_t msr)
665{
666return -1;
667}
668
669int setup_cpu_msr(int cpu)
670{
671msg_pinfo("No MSR support for your OS yet.\n");
672return -1;
673}
674
675void cleanup_cpu_msr(void)
676{
677// Nothing, yet.
678}
679#endif // OS switches for MSR code
680#else // x86
681/* Does MSR exist on non-x86 architectures? */
682#endif // arch switches for MSR code

Archive Download this file

Revision: HEAD